]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
KVM: x86/pmu: Add documentation for fixed ctr on PMU filter
[thirdparty/kernel/stable.git] / drivers / net / ethernet / intel / i40e / i40e_virtchnl_pf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "i40e.h"
5
6 /*********************notification routines***********************/
7
8 /**
9 * i40e_vc_vf_broadcast
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
14 * @msglen: msg length
15 *
16 * send a message to all VFs on a given PF
17 **/
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 int v_retval, u8 *msg,
21 u16 msglen)
22 {
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
36 */
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40 }
41
42 /**
43 * i40e_vc_link_speed2mbps
44 * converts i40e_aq_link_speed to integer value of Mbps
45 * @link_speed: the speed to convert
46 *
47 * return the speed as direct value of Mbps.
48 **/
49 static u32
50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
51 {
52 switch (link_speed) {
53 case I40E_LINK_SPEED_100MB:
54 return SPEED_100;
55 case I40E_LINK_SPEED_1GB:
56 return SPEED_1000;
57 case I40E_LINK_SPEED_2_5GB:
58 return SPEED_2500;
59 case I40E_LINK_SPEED_5GB:
60 return SPEED_5000;
61 case I40E_LINK_SPEED_10GB:
62 return SPEED_10000;
63 case I40E_LINK_SPEED_20GB:
64 return SPEED_20000;
65 case I40E_LINK_SPEED_25GB:
66 return SPEED_25000;
67 case I40E_LINK_SPEED_40GB:
68 return SPEED_40000;
69 case I40E_LINK_SPEED_UNKNOWN:
70 return SPEED_UNKNOWN;
71 }
72 return SPEED_UNKNOWN;
73 }
74
75 /**
76 * i40e_set_vf_link_state
77 * @vf: pointer to the VF structure
78 * @pfe: pointer to PF event structure
79 * @ls: pointer to link status structure
80 *
81 * set a link state on a single vf
82 **/
83 static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
85 {
86 u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
87
88 if (vf->link_forced)
89 link_status = vf->link_up;
90
91 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 pfe->event_data.link_event_adv.link_speed = link_status ?
93 i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 pfe->event_data.link_event_adv.link_status = link_status;
95 } else {
96 pfe->event_data.link_event.link_speed = link_status ?
97 i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 pfe->event_data.link_event.link_status = link_status;
99 }
100 }
101
102 /**
103 * i40e_vc_notify_vf_link_state
104 * @vf: pointer to the VF structure
105 *
106 * send a link status message to a single VF
107 **/
108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
109 {
110 struct virtchnl_pf_event pfe;
111 struct i40e_pf *pf = vf->pf;
112 struct i40e_hw *hw = &pf->hw;
113 struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
115
116 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 pfe.severity = PF_EVENT_SEVERITY_INFO;
118
119 i40e_set_vf_link_state(vf, &pfe, ls);
120
121 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 0, (u8 *)&pfe, sizeof(pfe), NULL);
123 }
124
125 /**
126 * i40e_vc_notify_link_state
127 * @pf: pointer to the PF structure
128 *
129 * send a link status message to all VFs on a given PF
130 **/
131 void i40e_vc_notify_link_state(struct i40e_pf *pf)
132 {
133 int i;
134
135 for (i = 0; i < pf->num_alloc_vfs; i++)
136 i40e_vc_notify_vf_link_state(&pf->vf[i]);
137 }
138
139 /**
140 * i40e_vc_notify_reset
141 * @pf: pointer to the PF structure
142 *
143 * indicate a pending reset to all VFs on a given PF
144 **/
145 void i40e_vc_notify_reset(struct i40e_pf *pf)
146 {
147 struct virtchnl_pf_event pfe;
148
149 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
153 }
154
155 /**
156 * i40e_vc_notify_vf_reset
157 * @vf: pointer to the VF structure
158 *
159 * indicate a pending reset to the given VF
160 **/
161 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
162 {
163 struct virtchnl_pf_event pfe;
164 int abs_vf_id;
165
166 /* validate the request */
167 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
168 return;
169
170 /* verify if the VF is in either init or active before proceeding */
171 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
172 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
173 return;
174
175 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
176
177 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
178 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
179 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
180 0, (u8 *)&pfe,
181 sizeof(struct virtchnl_pf_event), NULL);
182 }
183 /***********************misc routines*****************************/
184
185 /**
186 * i40e_vc_reset_vf
187 * @vf: pointer to the VF info
188 * @notify_vf: notify vf about reset or not
189 * Reset VF handler.
190 **/
191 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
192 {
193 struct i40e_pf *pf = vf->pf;
194 int i;
195
196 if (notify_vf)
197 i40e_vc_notify_vf_reset(vf);
198
199 /* We want to ensure that an actual reset occurs initiated after this
200 * function was called. However, we do not want to wait forever, so
201 * we'll give a reasonable time and print a message if we failed to
202 * ensure a reset.
203 */
204 for (i = 0; i < 20; i++) {
205 /* If PF is in VFs releasing state reset VF is impossible,
206 * so leave it.
207 */
208 if (test_bit(__I40E_VFS_RELEASING, pf->state))
209 return;
210 if (i40e_reset_vf(vf, false))
211 return;
212 usleep_range(10000, 20000);
213 }
214
215 if (notify_vf)
216 dev_warn(&vf->pf->pdev->dev,
217 "Failed to initiate reset for VF %d after 200 milliseconds\n",
218 vf->vf_id);
219 else
220 dev_dbg(&vf->pf->pdev->dev,
221 "Failed to initiate reset for VF %d after 200 milliseconds\n",
222 vf->vf_id);
223 }
224
225 /**
226 * i40e_vc_isvalid_vsi_id
227 * @vf: pointer to the VF info
228 * @vsi_id: VF relative VSI id
229 *
230 * check for the valid VSI id
231 **/
232 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
233 {
234 struct i40e_pf *pf = vf->pf;
235 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
236
237 return (vsi && (vsi->vf_id == vf->vf_id));
238 }
239
240 /**
241 * i40e_vc_isvalid_queue_id
242 * @vf: pointer to the VF info
243 * @vsi_id: vsi id
244 * @qid: vsi relative queue id
245 *
246 * check for the valid queue id
247 **/
248 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
249 u16 qid)
250 {
251 struct i40e_pf *pf = vf->pf;
252 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
253
254 return (vsi && (qid < vsi->alloc_queue_pairs));
255 }
256
257 /**
258 * i40e_vc_isvalid_vector_id
259 * @vf: pointer to the VF info
260 * @vector_id: VF relative vector id
261 *
262 * check for the valid vector id
263 **/
264 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
265 {
266 struct i40e_pf *pf = vf->pf;
267
268 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
269 }
270
271 /***********************vf resource mgmt routines*****************/
272
273 /**
274 * i40e_vc_get_pf_queue_id
275 * @vf: pointer to the VF info
276 * @vsi_id: id of VSI as provided by the FW
277 * @vsi_queue_id: vsi relative queue id
278 *
279 * return PF relative queue id
280 **/
281 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
282 u8 vsi_queue_id)
283 {
284 struct i40e_pf *pf = vf->pf;
285 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
286 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
287
288 if (!vsi)
289 return pf_queue_id;
290
291 if (le16_to_cpu(vsi->info.mapping_flags) &
292 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
293 pf_queue_id =
294 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
295 else
296 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
297 vsi_queue_id;
298
299 return pf_queue_id;
300 }
301
302 /**
303 * i40e_get_real_pf_qid
304 * @vf: pointer to the VF info
305 * @vsi_id: vsi id
306 * @queue_id: queue number
307 *
308 * wrapper function to get pf_queue_id handling ADq code as well
309 **/
310 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
311 {
312 int i;
313
314 if (vf->adq_enabled) {
315 /* Although VF considers all the queues(can be 1 to 16) as its
316 * own but they may actually belong to different VSIs(up to 4).
317 * We need to find which queues belongs to which VSI.
318 */
319 for (i = 0; i < vf->num_tc; i++) {
320 if (queue_id < vf->ch[i].num_qps) {
321 vsi_id = vf->ch[i].vsi_id;
322 break;
323 }
324 /* find right queue id which is relative to a
325 * given VSI.
326 */
327 queue_id -= vf->ch[i].num_qps;
328 }
329 }
330
331 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
332 }
333
334 /**
335 * i40e_config_irq_link_list
336 * @vf: pointer to the VF info
337 * @vsi_id: id of VSI as given by the FW
338 * @vecmap: irq map info
339 *
340 * configure irq link list from the map
341 **/
342 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
343 struct virtchnl_vector_map *vecmap)
344 {
345 unsigned long linklistmap = 0, tempmap;
346 struct i40e_pf *pf = vf->pf;
347 struct i40e_hw *hw = &pf->hw;
348 u16 vsi_queue_id, pf_queue_id;
349 enum i40e_queue_type qtype;
350 u16 next_q, vector_id, size;
351 u32 reg, reg_idx;
352 u16 itr_idx = 0;
353
354 vector_id = vecmap->vector_id;
355 /* setup the head */
356 if (0 == vector_id)
357 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
358 else
359 reg_idx = I40E_VPINT_LNKLSTN(
360 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
361 (vector_id - 1));
362
363 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
364 /* Special case - No queues mapped on this vector */
365 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
366 goto irq_list_done;
367 }
368 tempmap = vecmap->rxq_map;
369 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
370 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
371 vsi_queue_id));
372 }
373
374 tempmap = vecmap->txq_map;
375 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
376 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
377 vsi_queue_id + 1));
378 }
379
380 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
381 next_q = find_first_bit(&linklistmap, size);
382 if (unlikely(next_q == size))
383 goto irq_list_done;
384
385 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
386 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
387 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
388 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
389
390 wr32(hw, reg_idx, reg);
391
392 while (next_q < size) {
393 switch (qtype) {
394 case I40E_QUEUE_TYPE_RX:
395 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
396 itr_idx = vecmap->rxitr_idx;
397 break;
398 case I40E_QUEUE_TYPE_TX:
399 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
400 itr_idx = vecmap->txitr_idx;
401 break;
402 default:
403 break;
404 }
405
406 next_q = find_next_bit(&linklistmap, size, next_q + 1);
407 if (next_q < size) {
408 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
410 pf_queue_id = i40e_get_real_pf_qid(vf,
411 vsi_id,
412 vsi_queue_id);
413 } else {
414 pf_queue_id = I40E_QUEUE_END_OF_LIST;
415 qtype = 0;
416 }
417
418 /* format for the RQCTL & TQCTL regs is same */
419 reg = (vector_id) |
420 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
421 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
422 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
423 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
424 wr32(hw, reg_idx, reg);
425 }
426
427 /* if the vf is running in polling mode and using interrupt zero,
428 * need to disable auto-mask on enabling zero interrupt for VFs.
429 */
430 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
431 (vector_id == 0)) {
432 reg = rd32(hw, I40E_GLINT_CTL);
433 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
434 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
435 wr32(hw, I40E_GLINT_CTL, reg);
436 }
437 }
438
439 irq_list_done:
440 i40e_flush(hw);
441 }
442
443 /**
444 * i40e_release_rdma_qvlist
445 * @vf: pointer to the VF.
446 *
447 **/
448 static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
449 {
450 struct i40e_pf *pf = vf->pf;
451 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
452 u32 msix_vf;
453 u32 i;
454
455 if (!vf->qvlist_info)
456 return;
457
458 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
459 for (i = 0; i < qvlist_info->num_vectors; i++) {
460 struct virtchnl_rdma_qv_info *qv_info;
461 u32 next_q_index, next_q_type;
462 struct i40e_hw *hw = &pf->hw;
463 u32 v_idx, reg_idx, reg;
464
465 qv_info = &qvlist_info->qv_info[i];
466 if (!qv_info)
467 continue;
468 v_idx = qv_info->v_idx;
469 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
470 /* Figure out the queue after CEQ and make that the
471 * first queue.
472 */
473 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
474 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
475 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
476 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
477 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
478 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
479
480 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
481 reg = (next_q_index &
482 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
483 (next_q_type <<
484 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
485
486 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
487 }
488 }
489 kfree(vf->qvlist_info);
490 vf->qvlist_info = NULL;
491 }
492
493 /**
494 * i40e_config_rdma_qvlist
495 * @vf: pointer to the VF info
496 * @qvlist_info: queue and vector list
497 *
498 * Return 0 on success or < 0 on error
499 **/
500 static int
501 i40e_config_rdma_qvlist(struct i40e_vf *vf,
502 struct virtchnl_rdma_qvlist_info *qvlist_info)
503 {
504 struct i40e_pf *pf = vf->pf;
505 struct i40e_hw *hw = &pf->hw;
506 struct virtchnl_rdma_qv_info *qv_info;
507 u32 v_idx, i, reg_idx, reg;
508 u32 next_q_idx, next_q_type;
509 size_t size;
510 u32 msix_vf;
511 int ret = 0;
512
513 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
514
515 if (qvlist_info->num_vectors > msix_vf) {
516 dev_warn(&pf->pdev->dev,
517 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
518 qvlist_info->num_vectors,
519 msix_vf);
520 ret = -EINVAL;
521 goto err_out;
522 }
523
524 kfree(vf->qvlist_info);
525 size = virtchnl_struct_size(vf->qvlist_info, qv_info,
526 qvlist_info->num_vectors);
527 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
528 if (!vf->qvlist_info) {
529 ret = -ENOMEM;
530 goto err_out;
531 }
532 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
533
534 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
535 for (i = 0; i < qvlist_info->num_vectors; i++) {
536 qv_info = &qvlist_info->qv_info[i];
537 if (!qv_info)
538 continue;
539
540 /* Validate vector id belongs to this vf */
541 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
542 ret = -EINVAL;
543 goto err_free;
544 }
545
546 v_idx = qv_info->v_idx;
547
548 vf->qvlist_info->qv_info[i] = *qv_info;
549
550 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
551 /* We might be sharing the interrupt, so get the first queue
552 * index and type, push it down the list by adding the new
553 * queue on top. Also link it with the new queue in CEQCTL.
554 */
555 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
556 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
557 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
558 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
559 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
560
561 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
562 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
563 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
564 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
565 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
566 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
567 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
568 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
569
570 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
571 reg = (qv_info->ceq_idx &
572 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
573 (I40E_QUEUE_TYPE_PE_CEQ <<
574 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
575 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
576 }
577
578 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
579 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
580 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
581 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
582
583 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
584 }
585 }
586
587 return 0;
588 err_free:
589 kfree(vf->qvlist_info);
590 vf->qvlist_info = NULL;
591 err_out:
592 return ret;
593 }
594
595 /**
596 * i40e_config_vsi_tx_queue
597 * @vf: pointer to the VF info
598 * @vsi_id: id of VSI as provided by the FW
599 * @vsi_queue_id: vsi relative queue index
600 * @info: config. info
601 *
602 * configure tx queue
603 **/
604 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
605 u16 vsi_queue_id,
606 struct virtchnl_txq_info *info)
607 {
608 struct i40e_pf *pf = vf->pf;
609 struct i40e_hw *hw = &pf->hw;
610 struct i40e_hmc_obj_txq tx_ctx;
611 struct i40e_vsi *vsi;
612 u16 pf_queue_id;
613 u32 qtx_ctl;
614 int ret = 0;
615
616 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
617 ret = -ENOENT;
618 goto error_context;
619 }
620 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
621 vsi = i40e_find_vsi_from_id(pf, vsi_id);
622 if (!vsi) {
623 ret = -ENOENT;
624 goto error_context;
625 }
626
627 /* clear the context structure first */
628 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
629
630 /* only set the required fields */
631 tx_ctx.base = info->dma_ring_addr / 128;
632 tx_ctx.qlen = info->ring_len;
633 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
634 tx_ctx.rdylist_act = 0;
635 tx_ctx.head_wb_ena = info->headwb_enabled;
636 tx_ctx.head_wb_addr = info->dma_headwb_addr;
637
638 /* clear the context in the HMC */
639 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
640 if (ret) {
641 dev_err(&pf->pdev->dev,
642 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
643 pf_queue_id, ret);
644 ret = -ENOENT;
645 goto error_context;
646 }
647
648 /* set the context in the HMC */
649 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
650 if (ret) {
651 dev_err(&pf->pdev->dev,
652 "Failed to set VF LAN Tx queue context %d error: %d\n",
653 pf_queue_id, ret);
654 ret = -ENOENT;
655 goto error_context;
656 }
657
658 /* associate this queue with the PCI VF function */
659 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
660 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
661 & I40E_QTX_CTL_PF_INDX_MASK);
662 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
663 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
664 & I40E_QTX_CTL_VFVM_INDX_MASK);
665 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
666 i40e_flush(hw);
667
668 error_context:
669 return ret;
670 }
671
672 /**
673 * i40e_config_vsi_rx_queue
674 * @vf: pointer to the VF info
675 * @vsi_id: id of VSI as provided by the FW
676 * @vsi_queue_id: vsi relative queue index
677 * @info: config. info
678 *
679 * configure rx queue
680 **/
681 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
682 u16 vsi_queue_id,
683 struct virtchnl_rxq_info *info)
684 {
685 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
686 struct i40e_pf *pf = vf->pf;
687 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_hmc_obj_rxq rx_ctx;
690 int ret = 0;
691
692 /* clear the context structure first */
693 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
694
695 /* only set the required fields */
696 rx_ctx.base = info->dma_ring_addr / 128;
697 rx_ctx.qlen = info->ring_len;
698
699 if (info->splithdr_enabled) {
700 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
701 I40E_RX_SPLIT_IP |
702 I40E_RX_SPLIT_TCP_UDP |
703 I40E_RX_SPLIT_SCTP;
704 /* header length validation */
705 if (info->hdr_size > ((2 * 1024) - 64)) {
706 ret = -EINVAL;
707 goto error_param;
708 }
709 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
710
711 /* set split mode 10b */
712 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
713 }
714
715 /* databuffer length validation */
716 if (info->databuffer_size > ((16 * 1024) - 128)) {
717 ret = -EINVAL;
718 goto error_param;
719 }
720 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
721
722 /* max pkt. length validation */
723 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
724 ret = -EINVAL;
725 goto error_param;
726 }
727 rx_ctx.rxmax = info->max_pkt_size;
728
729 /* if port VLAN is configured increase the max packet size */
730 if (vsi->info.pvid)
731 rx_ctx.rxmax += VLAN_HLEN;
732
733 /* enable 32bytes desc always */
734 rx_ctx.dsize = 1;
735
736 /* default values */
737 rx_ctx.lrxqthresh = 1;
738 rx_ctx.crcstrip = 1;
739 rx_ctx.prefena = 1;
740 rx_ctx.l2tsel = 1;
741
742 /* clear the context in the HMC */
743 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
744 if (ret) {
745 dev_err(&pf->pdev->dev,
746 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
747 pf_queue_id, ret);
748 ret = -ENOENT;
749 goto error_param;
750 }
751
752 /* set the context in the HMC */
753 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
754 if (ret) {
755 dev_err(&pf->pdev->dev,
756 "Failed to set VF LAN Rx queue context %d error: %d\n",
757 pf_queue_id, ret);
758 ret = -ENOENT;
759 goto error_param;
760 }
761
762 error_param:
763 return ret;
764 }
765
766 /**
767 * i40e_alloc_vsi_res
768 * @vf: pointer to the VF info
769 * @idx: VSI index, applies only for ADq mode, zero otherwise
770 *
771 * alloc VF vsi context & resources
772 **/
773 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
774 {
775 struct i40e_mac_filter *f = NULL;
776 struct i40e_pf *pf = vf->pf;
777 struct i40e_vsi *vsi;
778 u64 max_tx_rate = 0;
779 int ret = 0;
780
781 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
782 vf->vf_id);
783
784 if (!vsi) {
785 dev_err(&pf->pdev->dev,
786 "add vsi failed for VF %d, aq_err %d\n",
787 vf->vf_id, pf->hw.aq.asq_last_status);
788 ret = -ENOENT;
789 goto error_alloc_vsi_res;
790 }
791
792 if (!idx) {
793 u64 hena = i40e_pf_get_default_rss_hena(pf);
794 u8 broadcast[ETH_ALEN];
795
796 vf->lan_vsi_idx = vsi->idx;
797 vf->lan_vsi_id = vsi->id;
798 /* If the port VLAN has been configured and then the
799 * VF driver was removed then the VSI port VLAN
800 * configuration was destroyed. Check if there is
801 * a port VLAN and restore the VSI configuration if
802 * needed.
803 */
804 if (vf->port_vlan_id)
805 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
806
807 spin_lock_bh(&vsi->mac_filter_hash_lock);
808 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
809 f = i40e_add_mac_filter(vsi,
810 vf->default_lan_addr.addr);
811 if (!f)
812 dev_info(&pf->pdev->dev,
813 "Could not add MAC filter %pM for VF %d\n",
814 vf->default_lan_addr.addr, vf->vf_id);
815 }
816 eth_broadcast_addr(broadcast);
817 f = i40e_add_mac_filter(vsi, broadcast);
818 if (!f)
819 dev_info(&pf->pdev->dev,
820 "Could not allocate VF broadcast filter\n");
821 spin_unlock_bh(&vsi->mac_filter_hash_lock);
822 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
823 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
824 /* program mac filter only for VF VSI */
825 ret = i40e_sync_vsi_filters(vsi);
826 if (ret)
827 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
828 }
829
830 /* storing VSI index and id for ADq and don't apply the mac filter */
831 if (vf->adq_enabled) {
832 vf->ch[idx].vsi_idx = vsi->idx;
833 vf->ch[idx].vsi_id = vsi->id;
834 }
835
836 /* Set VF bandwidth if specified */
837 if (vf->tx_rate) {
838 max_tx_rate = vf->tx_rate;
839 } else if (vf->ch[idx].max_tx_rate) {
840 max_tx_rate = vf->ch[idx].max_tx_rate;
841 }
842
843 if (max_tx_rate) {
844 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
845 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
846 max_tx_rate, 0, NULL);
847 if (ret)
848 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
849 vf->vf_id, ret);
850 }
851
852 error_alloc_vsi_res:
853 return ret;
854 }
855
856 /**
857 * i40e_map_pf_queues_to_vsi
858 * @vf: pointer to the VF info
859 *
860 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
861 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
862 **/
863 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
864 {
865 struct i40e_pf *pf = vf->pf;
866 struct i40e_hw *hw = &pf->hw;
867 u32 reg, num_tc = 1; /* VF has at least one traffic class */
868 u16 vsi_id, qps;
869 int i, j;
870
871 if (vf->adq_enabled)
872 num_tc = vf->num_tc;
873
874 for (i = 0; i < num_tc; i++) {
875 if (vf->adq_enabled) {
876 qps = vf->ch[i].num_qps;
877 vsi_id = vf->ch[i].vsi_id;
878 } else {
879 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
880 vsi_id = vf->lan_vsi_id;
881 }
882
883 for (j = 0; j < 7; j++) {
884 if (j * 2 >= qps) {
885 /* end of list */
886 reg = 0x07FF07FF;
887 } else {
888 u16 qid = i40e_vc_get_pf_queue_id(vf,
889 vsi_id,
890 j * 2);
891 reg = qid;
892 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
893 (j * 2) + 1);
894 reg |= qid << 16;
895 }
896 i40e_write_rx_ctl(hw,
897 I40E_VSILAN_QTABLE(j, vsi_id),
898 reg);
899 }
900 }
901 }
902
903 /**
904 * i40e_map_pf_to_vf_queues
905 * @vf: pointer to the VF info
906 *
907 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
908 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
909 **/
910 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
911 {
912 struct i40e_pf *pf = vf->pf;
913 struct i40e_hw *hw = &pf->hw;
914 u32 reg, total_qps = 0;
915 u32 qps, num_tc = 1; /* VF has at least one traffic class */
916 u16 vsi_id, qid;
917 int i, j;
918
919 if (vf->adq_enabled)
920 num_tc = vf->num_tc;
921
922 for (i = 0; i < num_tc; i++) {
923 if (vf->adq_enabled) {
924 qps = vf->ch[i].num_qps;
925 vsi_id = vf->ch[i].vsi_id;
926 } else {
927 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
928 vsi_id = vf->lan_vsi_id;
929 }
930
931 for (j = 0; j < qps; j++) {
932 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
933
934 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
935 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
936 reg);
937 total_qps++;
938 }
939 }
940 }
941
942 /**
943 * i40e_enable_vf_mappings
944 * @vf: pointer to the VF info
945 *
946 * enable VF mappings
947 **/
948 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
949 {
950 struct i40e_pf *pf = vf->pf;
951 struct i40e_hw *hw = &pf->hw;
952 u32 reg;
953
954 /* Tell the hardware we're using noncontiguous mapping. HW requires
955 * that VF queues be mapped using this method, even when they are
956 * contiguous in real life
957 */
958 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
959 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
960
961 /* enable VF vplan_qtable mappings */
962 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
963 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
964
965 i40e_map_pf_to_vf_queues(vf);
966 i40e_map_pf_queues_to_vsi(vf);
967
968 i40e_flush(hw);
969 }
970
971 /**
972 * i40e_disable_vf_mappings
973 * @vf: pointer to the VF info
974 *
975 * disable VF mappings
976 **/
977 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
978 {
979 struct i40e_pf *pf = vf->pf;
980 struct i40e_hw *hw = &pf->hw;
981 int i;
982
983 /* disable qp mappings */
984 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
985 for (i = 0; i < I40E_MAX_VSI_QP; i++)
986 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
987 I40E_QUEUE_END_OF_LIST);
988 i40e_flush(hw);
989 }
990
991 /**
992 * i40e_free_vf_res
993 * @vf: pointer to the VF info
994 *
995 * free VF resources
996 **/
997 static void i40e_free_vf_res(struct i40e_vf *vf)
998 {
999 struct i40e_pf *pf = vf->pf;
1000 struct i40e_hw *hw = &pf->hw;
1001 u32 reg_idx, reg;
1002 int i, j, msix_vf;
1003
1004 /* Start by disabling VF's configuration API to prevent the OS from
1005 * accessing the VF's VSI after it's freed / invalidated.
1006 */
1007 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1008
1009 /* It's possible the VF had requeuested more queues than the default so
1010 * do the accounting here when we're about to free them.
1011 */
1012 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1013 pf->queues_left += vf->num_queue_pairs -
1014 I40E_DEFAULT_QUEUES_PER_VF;
1015 }
1016
1017 /* free vsi & disconnect it from the parent uplink */
1018 if (vf->lan_vsi_idx) {
1019 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1020 vf->lan_vsi_idx = 0;
1021 vf->lan_vsi_id = 0;
1022 }
1023
1024 /* do the accounting and remove additional ADq VSI's */
1025 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1026 for (j = 0; j < vf->num_tc; j++) {
1027 /* At this point VSI0 is already released so don't
1028 * release it again and only clear their values in
1029 * structure variables
1030 */
1031 if (j)
1032 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1033 vf->ch[j].vsi_idx = 0;
1034 vf->ch[j].vsi_id = 0;
1035 }
1036 }
1037 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1038
1039 /* disable interrupts so the VF starts in a known state */
1040 for (i = 0; i < msix_vf; i++) {
1041 /* format is same for both registers */
1042 if (0 == i)
1043 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1044 else
1045 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1046 (vf->vf_id))
1047 + (i - 1));
1048 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1049 i40e_flush(hw);
1050 }
1051
1052 /* clear the irq settings */
1053 for (i = 0; i < msix_vf; i++) {
1054 /* format is same for both registers */
1055 if (0 == i)
1056 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1057 else
1058 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1059 (vf->vf_id))
1060 + (i - 1));
1061 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1062 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1063 wr32(hw, reg_idx, reg);
1064 i40e_flush(hw);
1065 }
1066 /* reset some of the state variables keeping track of the resources */
1067 vf->num_queue_pairs = 0;
1068 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1069 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1070 }
1071
1072 /**
1073 * i40e_alloc_vf_res
1074 * @vf: pointer to the VF info
1075 *
1076 * allocate VF resources
1077 **/
1078 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1079 {
1080 struct i40e_pf *pf = vf->pf;
1081 int total_queue_pairs = 0;
1082 int ret, idx;
1083
1084 if (vf->num_req_queues &&
1085 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1086 pf->num_vf_qps = vf->num_req_queues;
1087 else
1088 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1089
1090 /* allocate hw vsi context & associated resources */
1091 ret = i40e_alloc_vsi_res(vf, 0);
1092 if (ret)
1093 goto error_alloc;
1094 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1095
1096 /* allocate additional VSIs based on tc information for ADq */
1097 if (vf->adq_enabled) {
1098 if (pf->queues_left >=
1099 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1100 /* TC 0 always belongs to VF VSI */
1101 for (idx = 1; idx < vf->num_tc; idx++) {
1102 ret = i40e_alloc_vsi_res(vf, idx);
1103 if (ret)
1104 goto error_alloc;
1105 }
1106 /* send correct number of queues */
1107 total_queue_pairs = I40E_MAX_VF_QUEUES;
1108 } else {
1109 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1110 vf->vf_id);
1111 vf->adq_enabled = false;
1112 }
1113 }
1114
1115 /* We account for each VF to get a default number of queue pairs. If
1116 * the VF has now requested more, we need to account for that to make
1117 * certain we never request more queues than we actually have left in
1118 * HW.
1119 */
1120 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1121 pf->queues_left -=
1122 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1123
1124 if (vf->trusted)
1125 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1126 else
1127 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1128
1129 /* store the total qps number for the runtime
1130 * VF req validation
1131 */
1132 vf->num_queue_pairs = total_queue_pairs;
1133
1134 /* VF is now completely initialized */
1135 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1136
1137 error_alloc:
1138 if (ret)
1139 i40e_free_vf_res(vf);
1140
1141 return ret;
1142 }
1143
1144 #define VF_DEVICE_STATUS 0xAA
1145 #define VF_TRANS_PENDING_MASK 0x20
1146 /**
1147 * i40e_quiesce_vf_pci
1148 * @vf: pointer to the VF structure
1149 *
1150 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1151 * if the transactions never clear.
1152 **/
1153 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1154 {
1155 struct i40e_pf *pf = vf->pf;
1156 struct i40e_hw *hw = &pf->hw;
1157 int vf_abs_id, i;
1158 u32 reg;
1159
1160 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1161
1162 wr32(hw, I40E_PF_PCI_CIAA,
1163 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1164 for (i = 0; i < 100; i++) {
1165 reg = rd32(hw, I40E_PF_PCI_CIAD);
1166 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1167 return 0;
1168 udelay(1);
1169 }
1170 return -EIO;
1171 }
1172
1173 /**
1174 * __i40e_getnum_vf_vsi_vlan_filters
1175 * @vsi: pointer to the vsi
1176 *
1177 * called to get the number of VLANs offloaded on this VF
1178 **/
1179 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1180 {
1181 struct i40e_mac_filter *f;
1182 u16 num_vlans = 0, bkt;
1183
1184 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1185 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1186 num_vlans++;
1187 }
1188
1189 return num_vlans;
1190 }
1191
1192 /**
1193 * i40e_getnum_vf_vsi_vlan_filters
1194 * @vsi: pointer to the vsi
1195 *
1196 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1197 **/
1198 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1199 {
1200 int num_vlans;
1201
1202 spin_lock_bh(&vsi->mac_filter_hash_lock);
1203 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1204 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1205
1206 return num_vlans;
1207 }
1208
1209 /**
1210 * i40e_get_vlan_list_sync
1211 * @vsi: pointer to the VSI
1212 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1213 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1214 * This array is allocated here, but has to be freed in caller.
1215 *
1216 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1217 **/
1218 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1219 s16 **vlan_list)
1220 {
1221 struct i40e_mac_filter *f;
1222 int i = 0;
1223 int bkt;
1224
1225 spin_lock_bh(&vsi->mac_filter_hash_lock);
1226 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1227 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1228 if (!(*vlan_list))
1229 goto err;
1230
1231 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1232 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1233 continue;
1234 (*vlan_list)[i++] = f->vlan;
1235 }
1236 err:
1237 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1238 }
1239
1240 /**
1241 * i40e_set_vsi_promisc
1242 * @vf: pointer to the VF struct
1243 * @seid: VSI number
1244 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1245 * for a given VLAN
1246 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1247 * for a given VLAN
1248 * @vl: List of VLANs - apply filter for given VLANs
1249 * @num_vlans: Number of elements in @vl
1250 **/
1251 static int
1252 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1253 bool unicast_enable, s16 *vl, u16 num_vlans)
1254 {
1255 struct i40e_pf *pf = vf->pf;
1256 struct i40e_hw *hw = &pf->hw;
1257 int aq_ret, aq_tmp = 0;
1258 int i;
1259
1260 /* No VLAN to set promisc on, set on VSI */
1261 if (!num_vlans || !vl) {
1262 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1263 multi_enable,
1264 NULL);
1265 if (aq_ret) {
1266 int aq_err = pf->hw.aq.asq_last_status;
1267
1268 dev_err(&pf->pdev->dev,
1269 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1270 vf->vf_id,
1271 ERR_PTR(aq_ret),
1272 i40e_aq_str(&pf->hw, aq_err));
1273
1274 return aq_ret;
1275 }
1276
1277 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1278 unicast_enable,
1279 NULL, true);
1280
1281 if (aq_ret) {
1282 int aq_err = pf->hw.aq.asq_last_status;
1283
1284 dev_err(&pf->pdev->dev,
1285 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1286 vf->vf_id,
1287 ERR_PTR(aq_ret),
1288 i40e_aq_str(&pf->hw, aq_err));
1289 }
1290
1291 return aq_ret;
1292 }
1293
1294 for (i = 0; i < num_vlans; i++) {
1295 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1296 multi_enable,
1297 vl[i], NULL);
1298 if (aq_ret) {
1299 int aq_err = pf->hw.aq.asq_last_status;
1300
1301 dev_err(&pf->pdev->dev,
1302 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1303 vf->vf_id,
1304 ERR_PTR(aq_ret),
1305 i40e_aq_str(&pf->hw, aq_err));
1306
1307 if (!aq_tmp)
1308 aq_tmp = aq_ret;
1309 }
1310
1311 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1312 unicast_enable,
1313 vl[i], NULL);
1314 if (aq_ret) {
1315 int aq_err = pf->hw.aq.asq_last_status;
1316
1317 dev_err(&pf->pdev->dev,
1318 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1319 vf->vf_id,
1320 ERR_PTR(aq_ret),
1321 i40e_aq_str(&pf->hw, aq_err));
1322
1323 if (!aq_tmp)
1324 aq_tmp = aq_ret;
1325 }
1326 }
1327
1328 if (aq_tmp)
1329 aq_ret = aq_tmp;
1330
1331 return aq_ret;
1332 }
1333
1334 /**
1335 * i40e_config_vf_promiscuous_mode
1336 * @vf: pointer to the VF info
1337 * @vsi_id: VSI id
1338 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1339 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1340 *
1341 * Called from the VF to configure the promiscuous mode of
1342 * VF vsis and from the VF reset path to reset promiscuous mode.
1343 **/
1344 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1345 u16 vsi_id,
1346 bool allmulti,
1347 bool alluni)
1348 {
1349 struct i40e_pf *pf = vf->pf;
1350 struct i40e_vsi *vsi;
1351 int aq_ret = 0;
1352 u16 num_vlans;
1353 s16 *vl;
1354
1355 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1356 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1357 return -EINVAL;
1358
1359 if (vf->port_vlan_id) {
1360 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1361 alluni, &vf->port_vlan_id, 1);
1362 return aq_ret;
1363 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1364 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1365
1366 if (!vl)
1367 return -ENOMEM;
1368
1369 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1370 vl, num_vlans);
1371 kfree(vl);
1372 return aq_ret;
1373 }
1374
1375 /* no VLANs to set on, set on VSI */
1376 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1377 NULL, 0);
1378 return aq_ret;
1379 }
1380
1381 /**
1382 * i40e_sync_vfr_reset
1383 * @hw: pointer to hw struct
1384 * @vf_id: VF identifier
1385 *
1386 * Before trigger hardware reset, we need to know if no other process has
1387 * reserved the hardware for any reset operations. This check is done by
1388 * examining the status of the RSTAT1 register used to signal the reset.
1389 **/
1390 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1391 {
1392 u32 reg;
1393 int i;
1394
1395 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1396 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1397 I40E_VFINT_ICR0_ADMINQ_MASK;
1398 if (reg)
1399 return 0;
1400
1401 usleep_range(100, 200);
1402 }
1403
1404 return -EAGAIN;
1405 }
1406
1407 /**
1408 * i40e_trigger_vf_reset
1409 * @vf: pointer to the VF structure
1410 * @flr: VFLR was issued or not
1411 *
1412 * Trigger hardware to start a reset for a particular VF. Expects the caller
1413 * to wait the proper amount of time to allow hardware to reset the VF before
1414 * it cleans up and restores VF functionality.
1415 **/
1416 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1417 {
1418 struct i40e_pf *pf = vf->pf;
1419 struct i40e_hw *hw = &pf->hw;
1420 u32 reg, reg_idx, bit_idx;
1421 bool vf_active;
1422 u32 radq;
1423
1424 /* warn the VF */
1425 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1426
1427 /* Disable VF's configuration API during reset. The flag is re-enabled
1428 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1429 * It's normally disabled in i40e_free_vf_res(), but it's safer
1430 * to do it earlier to give some time to finish to any VF config
1431 * functions that may still be running at this point.
1432 */
1433 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1434
1435 /* In the case of a VFLR, the HW has already reset the VF and we
1436 * just need to clean up, so don't hit the VFRTRIG register.
1437 */
1438 if (!flr) {
1439 /* Sync VFR reset before trigger next one */
1440 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1441 I40E_VFINT_ICR0_ADMINQ_MASK;
1442 if (vf_active && !radq)
1443 /* waiting for finish reset by virtual driver */
1444 if (i40e_sync_vfr_reset(hw, vf->vf_id))
1445 dev_info(&pf->pdev->dev,
1446 "Reset VF %d never finished\n",
1447 vf->vf_id);
1448
1449 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1450 * in progress state in rstat1 register.
1451 */
1452 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1453 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1454 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1455 i40e_flush(hw);
1456 }
1457 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1458 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1459 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1460 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1461 i40e_flush(hw);
1462
1463 if (i40e_quiesce_vf_pci(vf))
1464 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1465 vf->vf_id);
1466 }
1467
1468 /**
1469 * i40e_cleanup_reset_vf
1470 * @vf: pointer to the VF structure
1471 *
1472 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1473 * have verified whether the reset is finished properly, and ensure the
1474 * minimum amount of wait time has passed.
1475 **/
1476 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1477 {
1478 struct i40e_pf *pf = vf->pf;
1479 struct i40e_hw *hw = &pf->hw;
1480 u32 reg;
1481
1482 /* disable promisc modes in case they were enabled */
1483 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1484
1485 /* free VF resources to begin resetting the VSI state */
1486 i40e_free_vf_res(vf);
1487
1488 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1489 * By doing this we allow HW to access VF memory at any point. If we
1490 * did it any sooner, HW could access memory while it was being freed
1491 * in i40e_free_vf_res(), causing an IOMMU fault.
1492 *
1493 * On the other hand, this needs to be done ASAP, because the VF driver
1494 * is waiting for this to happen and may report a timeout. It's
1495 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1496 * it.
1497 */
1498 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1499 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1500 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1501
1502 /* reallocate VF resources to finish resetting the VSI state */
1503 if (!i40e_alloc_vf_res(vf)) {
1504 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1505 i40e_enable_vf_mappings(vf);
1506 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1507 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1508 /* Do not notify the client during VF init */
1509 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1510 &vf->vf_states))
1511 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1512 vf->num_vlan = 0;
1513 }
1514
1515 /* Tell the VF driver the reset is done. This needs to be done only
1516 * after VF has been fully initialized, because the VF driver may
1517 * request resources immediately after setting this flag.
1518 */
1519 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1520 }
1521
1522 /**
1523 * i40e_reset_vf
1524 * @vf: pointer to the VF structure
1525 * @flr: VFLR was issued or not
1526 *
1527 * Returns true if the VF is in reset, resets successfully, or resets
1528 * are disabled and false otherwise.
1529 **/
1530 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1531 {
1532 struct i40e_pf *pf = vf->pf;
1533 struct i40e_hw *hw = &pf->hw;
1534 bool rsd = false;
1535 u32 reg;
1536 int i;
1537
1538 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1539 return true;
1540
1541 /* Bail out if VFs are disabled. */
1542 if (test_bit(__I40E_VF_DISABLE, pf->state))
1543 return true;
1544
1545 /* If VF is being reset already we don't need to continue. */
1546 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1547 return true;
1548
1549 i40e_trigger_vf_reset(vf, flr);
1550
1551 /* poll VPGEN_VFRSTAT reg to make sure
1552 * that reset is complete
1553 */
1554 for (i = 0; i < 10; i++) {
1555 /* VF reset requires driver to first reset the VF and then
1556 * poll the status register to make sure that the reset
1557 * completed successfully. Due to internal HW FIFO flushes,
1558 * we must wait 10ms before the register will be valid.
1559 */
1560 usleep_range(10000, 20000);
1561 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1562 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1563 rsd = true;
1564 break;
1565 }
1566 }
1567
1568 if (flr)
1569 usleep_range(10000, 20000);
1570
1571 if (!rsd)
1572 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1573 vf->vf_id);
1574 usleep_range(10000, 20000);
1575
1576 /* On initial reset, we don't have any queues to disable */
1577 if (vf->lan_vsi_idx != 0)
1578 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1579
1580 i40e_cleanup_reset_vf(vf);
1581
1582 i40e_flush(hw);
1583 usleep_range(20000, 40000);
1584 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1585
1586 return true;
1587 }
1588
1589 /**
1590 * i40e_reset_all_vfs
1591 * @pf: pointer to the PF structure
1592 * @flr: VFLR was issued or not
1593 *
1594 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1595 * VF, then do all the waiting in one chunk, and finally finish restoring each
1596 * VF after the wait. This is useful during PF routines which need to reset
1597 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1598 *
1599 * Returns true if any VFs were reset, and false otherwise.
1600 **/
1601 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1602 {
1603 struct i40e_hw *hw = &pf->hw;
1604 struct i40e_vf *vf;
1605 int i, v;
1606 u32 reg;
1607
1608 /* If we don't have any VFs, then there is nothing to reset */
1609 if (!pf->num_alloc_vfs)
1610 return false;
1611
1612 /* If VFs have been disabled, there is no need to reset */
1613 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1614 return false;
1615
1616 /* Begin reset on all VFs at once */
1617 for (v = 0; v < pf->num_alloc_vfs; v++) {
1618 vf = &pf->vf[v];
1619 /* If VF is being reset no need to trigger reset again */
1620 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1621 i40e_trigger_vf_reset(&pf->vf[v], flr);
1622 }
1623
1624 /* HW requires some time to make sure it can flush the FIFO for a VF
1625 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1626 * sequence to make sure that it has completed. We'll keep track of
1627 * the VFs using a simple iterator that increments once that VF has
1628 * finished resetting.
1629 */
1630 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1631 usleep_range(10000, 20000);
1632
1633 /* Check each VF in sequence, beginning with the VF to fail
1634 * the previous check.
1635 */
1636 while (v < pf->num_alloc_vfs) {
1637 vf = &pf->vf[v];
1638 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1639 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1640 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1641 break;
1642 }
1643
1644 /* If the current VF has finished resetting, move on
1645 * to the next VF in sequence.
1646 */
1647 v++;
1648 }
1649 }
1650
1651 if (flr)
1652 usleep_range(10000, 20000);
1653
1654 /* Display a warning if at least one VF didn't manage to reset in
1655 * time, but continue on with the operation.
1656 */
1657 if (v < pf->num_alloc_vfs)
1658 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1659 pf->vf[v].vf_id);
1660 usleep_range(10000, 20000);
1661
1662 /* Begin disabling all the rings associated with VFs, but do not wait
1663 * between each VF.
1664 */
1665 for (v = 0; v < pf->num_alloc_vfs; v++) {
1666 /* On initial reset, we don't have any queues to disable */
1667 if (pf->vf[v].lan_vsi_idx == 0)
1668 continue;
1669
1670 /* If VF is reset in another thread just continue */
1671 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1672 continue;
1673
1674 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1675 }
1676
1677 /* Now that we've notified HW to disable all of the VF rings, wait
1678 * until they finish.
1679 */
1680 for (v = 0; v < pf->num_alloc_vfs; v++) {
1681 /* On initial reset, we don't have any queues to disable */
1682 if (pf->vf[v].lan_vsi_idx == 0)
1683 continue;
1684
1685 /* If VF is reset in another thread just continue */
1686 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1687 continue;
1688
1689 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1690 }
1691
1692 /* Hw may need up to 50ms to finish disabling the RX queues. We
1693 * minimize the wait by delaying only once for all VFs.
1694 */
1695 mdelay(50);
1696
1697 /* Finish the reset on each VF */
1698 for (v = 0; v < pf->num_alloc_vfs; v++) {
1699 /* If VF is reset in another thread just continue */
1700 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1701 continue;
1702
1703 i40e_cleanup_reset_vf(&pf->vf[v]);
1704 }
1705
1706 i40e_flush(hw);
1707 usleep_range(20000, 40000);
1708 clear_bit(__I40E_VF_DISABLE, pf->state);
1709
1710 return true;
1711 }
1712
1713 /**
1714 * i40e_free_vfs
1715 * @pf: pointer to the PF structure
1716 *
1717 * free VF resources
1718 **/
1719 void i40e_free_vfs(struct i40e_pf *pf)
1720 {
1721 struct i40e_hw *hw = &pf->hw;
1722 u32 reg_idx, bit_idx;
1723 int i, tmp, vf_id;
1724
1725 if (!pf->vf)
1726 return;
1727
1728 set_bit(__I40E_VFS_RELEASING, pf->state);
1729 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1730 usleep_range(1000, 2000);
1731
1732 i40e_notify_client_of_vf_enable(pf, 0);
1733
1734 /* Disable IOV before freeing resources. This lets any VF drivers
1735 * running in the host get themselves cleaned up before we yank
1736 * the carpet out from underneath their feet.
1737 */
1738 if (!pci_vfs_assigned(pf->pdev))
1739 pci_disable_sriov(pf->pdev);
1740 else
1741 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1742
1743 /* Amortize wait time by stopping all VFs at the same time */
1744 for (i = 0; i < pf->num_alloc_vfs; i++) {
1745 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1746 continue;
1747
1748 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1749 }
1750
1751 for (i = 0; i < pf->num_alloc_vfs; i++) {
1752 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1753 continue;
1754
1755 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1756 }
1757
1758 /* free up VF resources */
1759 tmp = pf->num_alloc_vfs;
1760 pf->num_alloc_vfs = 0;
1761 for (i = 0; i < tmp; i++) {
1762 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1763 i40e_free_vf_res(&pf->vf[i]);
1764 /* disable qp mappings */
1765 i40e_disable_vf_mappings(&pf->vf[i]);
1766 }
1767
1768 kfree(pf->vf);
1769 pf->vf = NULL;
1770
1771 /* This check is for when the driver is unloaded while VFs are
1772 * assigned. Setting the number of VFs to 0 through sysfs is caught
1773 * before this function ever gets called.
1774 */
1775 if (!pci_vfs_assigned(pf->pdev)) {
1776 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1777 * work correctly when SR-IOV gets re-enabled.
1778 */
1779 for (vf_id = 0; vf_id < tmp; vf_id++) {
1780 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1781 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1782 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1783 }
1784 }
1785 clear_bit(__I40E_VF_DISABLE, pf->state);
1786 clear_bit(__I40E_VFS_RELEASING, pf->state);
1787 }
1788
1789 #ifdef CONFIG_PCI_IOV
1790 /**
1791 * i40e_alloc_vfs
1792 * @pf: pointer to the PF structure
1793 * @num_alloc_vfs: number of VFs to allocate
1794 *
1795 * allocate VF resources
1796 **/
1797 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1798 {
1799 struct i40e_vf *vfs;
1800 int i, ret = 0;
1801
1802 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1803 i40e_irq_dynamic_disable_icr0(pf);
1804
1805 /* Check to see if we're just allocating resources for extant VFs */
1806 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1807 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1808 if (ret) {
1809 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1810 pf->num_alloc_vfs = 0;
1811 goto err_iov;
1812 }
1813 }
1814 /* allocate memory */
1815 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1816 if (!vfs) {
1817 ret = -ENOMEM;
1818 goto err_alloc;
1819 }
1820 pf->vf = vfs;
1821
1822 /* apply default profile */
1823 for (i = 0; i < num_alloc_vfs; i++) {
1824 vfs[i].pf = pf;
1825 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1826 vfs[i].vf_id = i;
1827
1828 /* assign default capabilities */
1829 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1830 vfs[i].spoofchk = true;
1831
1832 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1833
1834 }
1835 pf->num_alloc_vfs = num_alloc_vfs;
1836
1837 /* VF resources get allocated during reset */
1838 i40e_reset_all_vfs(pf, false);
1839
1840 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1841
1842 err_alloc:
1843 if (ret)
1844 i40e_free_vfs(pf);
1845 err_iov:
1846 /* Re-enable interrupt 0. */
1847 i40e_irq_dynamic_enable_icr0(pf);
1848 return ret;
1849 }
1850
1851 #endif
1852 /**
1853 * i40e_pci_sriov_enable
1854 * @pdev: pointer to a pci_dev structure
1855 * @num_vfs: number of VFs to allocate
1856 *
1857 * Enable or change the number of VFs
1858 **/
1859 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1860 {
1861 #ifdef CONFIG_PCI_IOV
1862 struct i40e_pf *pf = pci_get_drvdata(pdev);
1863 int pre_existing_vfs = pci_num_vf(pdev);
1864 int err = 0;
1865
1866 if (test_bit(__I40E_TESTING, pf->state)) {
1867 dev_warn(&pdev->dev,
1868 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1869 err = -EPERM;
1870 goto err_out;
1871 }
1872
1873 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1874 i40e_free_vfs(pf);
1875 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1876 goto out;
1877
1878 if (num_vfs > pf->num_req_vfs) {
1879 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1880 num_vfs, pf->num_req_vfs);
1881 err = -EPERM;
1882 goto err_out;
1883 }
1884
1885 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1886 err = i40e_alloc_vfs(pf, num_vfs);
1887 if (err) {
1888 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1889 goto err_out;
1890 }
1891
1892 out:
1893 return num_vfs;
1894
1895 err_out:
1896 return err;
1897 #endif
1898 return 0;
1899 }
1900
1901 /**
1902 * i40e_pci_sriov_configure
1903 * @pdev: pointer to a pci_dev structure
1904 * @num_vfs: number of VFs to allocate
1905 *
1906 * Enable or change the number of VFs. Called when the user updates the number
1907 * of VFs in sysfs.
1908 **/
1909 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1910 {
1911 struct i40e_pf *pf = pci_get_drvdata(pdev);
1912 int ret = 0;
1913
1914 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1915 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1916 return -EAGAIN;
1917 }
1918
1919 if (num_vfs) {
1920 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1921 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1922 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1923 }
1924 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1925 goto sriov_configure_out;
1926 }
1927
1928 if (!pci_vfs_assigned(pf->pdev)) {
1929 i40e_free_vfs(pf);
1930 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1931 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1932 } else {
1933 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1934 ret = -EINVAL;
1935 goto sriov_configure_out;
1936 }
1937 sriov_configure_out:
1938 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1939 return ret;
1940 }
1941
1942 /***********************virtual channel routines******************/
1943
1944 /**
1945 * i40e_vc_send_msg_to_vf
1946 * @vf: pointer to the VF info
1947 * @v_opcode: virtual channel opcode
1948 * @v_retval: virtual channel return value
1949 * @msg: pointer to the msg buffer
1950 * @msglen: msg length
1951 *
1952 * send msg to VF
1953 **/
1954 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1955 u32 v_retval, u8 *msg, u16 msglen)
1956 {
1957 struct i40e_pf *pf;
1958 struct i40e_hw *hw;
1959 int abs_vf_id;
1960 int aq_ret;
1961
1962 /* validate the request */
1963 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1964 return -EINVAL;
1965
1966 pf = vf->pf;
1967 hw = &pf->hw;
1968 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1969
1970 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1971 msg, msglen, NULL);
1972 if (aq_ret) {
1973 dev_info(&pf->pdev->dev,
1974 "Unable to send the message to VF %d aq_err %d\n",
1975 vf->vf_id, pf->hw.aq.asq_last_status);
1976 return -EIO;
1977 }
1978
1979 return 0;
1980 }
1981
1982 /**
1983 * i40e_vc_send_resp_to_vf
1984 * @vf: pointer to the VF info
1985 * @opcode: operation code
1986 * @retval: return value
1987 *
1988 * send resp msg to VF
1989 **/
1990 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1991 enum virtchnl_ops opcode,
1992 int retval)
1993 {
1994 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1995 }
1996
1997 /**
1998 * i40e_sync_vf_state
1999 * @vf: pointer to the VF info
2000 * @state: VF state
2001 *
2002 * Called from a VF message to synchronize the service with a potential
2003 * VF reset state
2004 **/
2005 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2006 {
2007 int i;
2008
2009 /* When handling some messages, it needs VF state to be set.
2010 * It is possible that this flag is cleared during VF reset,
2011 * so there is a need to wait until the end of the reset to
2012 * handle the request message correctly.
2013 */
2014 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2015 if (test_bit(state, &vf->vf_states))
2016 return true;
2017 usleep_range(10000, 20000);
2018 }
2019
2020 return test_bit(state, &vf->vf_states);
2021 }
2022
2023 /**
2024 * i40e_vc_get_version_msg
2025 * @vf: pointer to the VF info
2026 * @msg: pointer to the msg buffer
2027 *
2028 * called from the VF to request the API version used by the PF
2029 **/
2030 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2031 {
2032 struct virtchnl_version_info info = {
2033 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2034 };
2035
2036 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2037 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2038 if (VF_IS_V10(&vf->vf_ver))
2039 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2040 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2041 0, (u8 *)&info,
2042 sizeof(struct virtchnl_version_info));
2043 }
2044
2045 /**
2046 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2047 * @vf: pointer to VF structure
2048 **/
2049 static void i40e_del_qch(struct i40e_vf *vf)
2050 {
2051 struct i40e_pf *pf = vf->pf;
2052 int i;
2053
2054 /* first element in the array belongs to primary VF VSI and we shouldn't
2055 * delete it. We should however delete the rest of the VSIs created
2056 */
2057 for (i = 1; i < vf->num_tc; i++) {
2058 if (vf->ch[i].vsi_idx) {
2059 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2060 vf->ch[i].vsi_idx = 0;
2061 vf->ch[i].vsi_id = 0;
2062 }
2063 }
2064 }
2065
2066 /**
2067 * i40e_vc_get_max_frame_size
2068 * @vf: pointer to the VF
2069 *
2070 * Max frame size is determined based on the current port's max frame size and
2071 * whether a port VLAN is configured on this VF. The VF is not aware whether
2072 * it's in a port VLAN so the PF needs to account for this in max frame size
2073 * checks and sending the max frame size to the VF.
2074 **/
2075 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2076 {
2077 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2078
2079 if (vf->port_vlan_id)
2080 max_frame_size -= VLAN_HLEN;
2081
2082 return max_frame_size;
2083 }
2084
2085 /**
2086 * i40e_vc_get_vf_resources_msg
2087 * @vf: pointer to the VF info
2088 * @msg: pointer to the msg buffer
2089 *
2090 * called from the VF to request its resources
2091 **/
2092 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2093 {
2094 struct virtchnl_vf_resource *vfres = NULL;
2095 struct i40e_pf *pf = vf->pf;
2096 struct i40e_vsi *vsi;
2097 int num_vsis = 1;
2098 int aq_ret = 0;
2099 size_t len = 0;
2100 int ret;
2101
2102 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2103 aq_ret = -EINVAL;
2104 goto err;
2105 }
2106
2107 len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2108 vfres = kzalloc(len, GFP_KERNEL);
2109 if (!vfres) {
2110 aq_ret = -ENOMEM;
2111 len = 0;
2112 goto err;
2113 }
2114 if (VF_IS_V11(&vf->vf_ver))
2115 vf->driver_caps = *(u32 *)msg;
2116 else
2117 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2118 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2119 VIRTCHNL_VF_OFFLOAD_VLAN;
2120
2121 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2122 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2123 vsi = pf->vsi[vf->lan_vsi_idx];
2124 if (!vsi->info.pvid)
2125 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2126
2127 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2128 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2129 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2130 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2131 } else {
2132 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2133 }
2134
2135 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2136 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2137 } else {
2138 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2139 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2140 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2141 else
2142 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2143 }
2144
2145 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2146 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2147 vfres->vf_cap_flags |=
2148 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2149 }
2150
2151 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2152 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2153
2154 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2155 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2156 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2157
2158 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2159 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2160 dev_err(&pf->pdev->dev,
2161 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2162 vf->vf_id);
2163 aq_ret = -EINVAL;
2164 goto err;
2165 }
2166 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2167 }
2168
2169 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2170 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2171 vfres->vf_cap_flags |=
2172 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2173 }
2174
2175 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2176 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2177
2178 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2179 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2180
2181 vfres->num_vsis = num_vsis;
2182 vfres->num_queue_pairs = vf->num_queue_pairs;
2183 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2184 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2185 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2186 vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2187
2188 if (vf->lan_vsi_idx) {
2189 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2190 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2191 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2192 /* VFs only use TC 0 */
2193 vfres->vsi_res[0].qset_handle
2194 = le16_to_cpu(vsi->info.qs_handle[0]);
2195 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2196 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2197 eth_zero_addr(vf->default_lan_addr.addr);
2198 }
2199 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2200 vf->default_lan_addr.addr);
2201 }
2202 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2203
2204 err:
2205 /* send the response back to the VF */
2206 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2207 aq_ret, (u8 *)vfres, len);
2208
2209 kfree(vfres);
2210 return ret;
2211 }
2212
2213 /**
2214 * i40e_vc_config_promiscuous_mode_msg
2215 * @vf: pointer to the VF info
2216 * @msg: pointer to the msg buffer
2217 *
2218 * called from the VF to configure the promiscuous mode of
2219 * VF vsis
2220 **/
2221 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2222 {
2223 struct virtchnl_promisc_info *info =
2224 (struct virtchnl_promisc_info *)msg;
2225 struct i40e_pf *pf = vf->pf;
2226 bool allmulti = false;
2227 bool alluni = false;
2228 int aq_ret = 0;
2229
2230 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2231 aq_ret = -EINVAL;
2232 goto err_out;
2233 }
2234 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2235 dev_err(&pf->pdev->dev,
2236 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2237 vf->vf_id);
2238
2239 /* Lie to the VF on purpose, because this is an error we can
2240 * ignore. Unprivileged VF is not a virtual channel error.
2241 */
2242 aq_ret = 0;
2243 goto err_out;
2244 }
2245
2246 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2247 aq_ret = -EINVAL;
2248 goto err_out;
2249 }
2250
2251 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2252 aq_ret = -EINVAL;
2253 goto err_out;
2254 }
2255
2256 /* Multicast promiscuous handling*/
2257 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2258 allmulti = true;
2259
2260 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2261 alluni = true;
2262 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2263 alluni);
2264 if (aq_ret)
2265 goto err_out;
2266
2267 if (allmulti) {
2268 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2269 &vf->vf_states))
2270 dev_info(&pf->pdev->dev,
2271 "VF %d successfully set multicast promiscuous mode\n",
2272 vf->vf_id);
2273 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2274 &vf->vf_states))
2275 dev_info(&pf->pdev->dev,
2276 "VF %d successfully unset multicast promiscuous mode\n",
2277 vf->vf_id);
2278
2279 if (alluni) {
2280 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2281 &vf->vf_states))
2282 dev_info(&pf->pdev->dev,
2283 "VF %d successfully set unicast promiscuous mode\n",
2284 vf->vf_id);
2285 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2286 &vf->vf_states))
2287 dev_info(&pf->pdev->dev,
2288 "VF %d successfully unset unicast promiscuous mode\n",
2289 vf->vf_id);
2290
2291 err_out:
2292 /* send the response to the VF */
2293 return i40e_vc_send_resp_to_vf(vf,
2294 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2295 aq_ret);
2296 }
2297
2298 /**
2299 * i40e_vc_config_queues_msg
2300 * @vf: pointer to the VF info
2301 * @msg: pointer to the msg buffer
2302 *
2303 * called from the VF to configure the rx/tx
2304 * queues
2305 **/
2306 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2307 {
2308 struct virtchnl_vsi_queue_config_info *qci =
2309 (struct virtchnl_vsi_queue_config_info *)msg;
2310 struct virtchnl_queue_pair_info *qpi;
2311 u16 vsi_id, vsi_queue_id = 0;
2312 struct i40e_pf *pf = vf->pf;
2313 int i, j = 0, idx = 0;
2314 struct i40e_vsi *vsi;
2315 u16 num_qps_all = 0;
2316 int aq_ret = 0;
2317
2318 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2319 aq_ret = -EINVAL;
2320 goto error_param;
2321 }
2322
2323 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2324 aq_ret = -EINVAL;
2325 goto error_param;
2326 }
2327
2328 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2329 aq_ret = -EINVAL;
2330 goto error_param;
2331 }
2332
2333 if (vf->adq_enabled) {
2334 for (i = 0; i < vf->num_tc; i++)
2335 num_qps_all += vf->ch[i].num_qps;
2336 if (num_qps_all != qci->num_queue_pairs) {
2337 aq_ret = -EINVAL;
2338 goto error_param;
2339 }
2340 }
2341
2342 vsi_id = qci->vsi_id;
2343
2344 for (i = 0; i < qci->num_queue_pairs; i++) {
2345 qpi = &qci->qpair[i];
2346
2347 if (!vf->adq_enabled) {
2348 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2349 qpi->txq.queue_id)) {
2350 aq_ret = -EINVAL;
2351 goto error_param;
2352 }
2353
2354 vsi_queue_id = qpi->txq.queue_id;
2355
2356 if (qpi->txq.vsi_id != qci->vsi_id ||
2357 qpi->rxq.vsi_id != qci->vsi_id ||
2358 qpi->rxq.queue_id != vsi_queue_id) {
2359 aq_ret = -EINVAL;
2360 goto error_param;
2361 }
2362 }
2363
2364 if (vf->adq_enabled) {
2365 if (idx >= ARRAY_SIZE(vf->ch)) {
2366 aq_ret = -ENODEV;
2367 goto error_param;
2368 }
2369 vsi_id = vf->ch[idx].vsi_id;
2370 }
2371
2372 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2373 &qpi->rxq) ||
2374 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2375 &qpi->txq)) {
2376 aq_ret = -EINVAL;
2377 goto error_param;
2378 }
2379
2380 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2381 * VF does not know about these additional VSIs and all
2382 * it cares is about its own queues. PF configures these queues
2383 * to its appropriate VSIs based on TC mapping
2384 */
2385 if (vf->adq_enabled) {
2386 if (idx >= ARRAY_SIZE(vf->ch)) {
2387 aq_ret = -ENODEV;
2388 goto error_param;
2389 }
2390 if (j == (vf->ch[idx].num_qps - 1)) {
2391 idx++;
2392 j = 0; /* resetting the queue count */
2393 vsi_queue_id = 0;
2394 } else {
2395 j++;
2396 vsi_queue_id++;
2397 }
2398 }
2399 }
2400 /* set vsi num_queue_pairs in use to num configured by VF */
2401 if (!vf->adq_enabled) {
2402 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2403 qci->num_queue_pairs;
2404 } else {
2405 for (i = 0; i < vf->num_tc; i++) {
2406 vsi = pf->vsi[vf->ch[i].vsi_idx];
2407 vsi->num_queue_pairs = vf->ch[i].num_qps;
2408
2409 if (i40e_update_adq_vsi_queues(vsi, i)) {
2410 aq_ret = -EIO;
2411 goto error_param;
2412 }
2413 }
2414 }
2415
2416 error_param:
2417 /* send the response to the VF */
2418 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2419 aq_ret);
2420 }
2421
2422 /**
2423 * i40e_validate_queue_map - check queue map is valid
2424 * @vf: the VF structure pointer
2425 * @vsi_id: vsi id
2426 * @queuemap: Tx or Rx queue map
2427 *
2428 * check if Tx or Rx queue map is valid
2429 **/
2430 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2431 unsigned long queuemap)
2432 {
2433 u16 vsi_queue_id, queue_id;
2434
2435 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2436 if (vf->adq_enabled) {
2437 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2438 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2439 } else {
2440 queue_id = vsi_queue_id;
2441 }
2442
2443 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2444 return -EINVAL;
2445 }
2446
2447 return 0;
2448 }
2449
2450 /**
2451 * i40e_vc_config_irq_map_msg
2452 * @vf: pointer to the VF info
2453 * @msg: pointer to the msg buffer
2454 *
2455 * called from the VF to configure the irq to
2456 * queue map
2457 **/
2458 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2459 {
2460 struct virtchnl_irq_map_info *irqmap_info =
2461 (struct virtchnl_irq_map_info *)msg;
2462 struct virtchnl_vector_map *map;
2463 int aq_ret = 0;
2464 u16 vsi_id;
2465 int i;
2466
2467 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2468 aq_ret = -EINVAL;
2469 goto error_param;
2470 }
2471
2472 if (irqmap_info->num_vectors >
2473 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2474 aq_ret = -EINVAL;
2475 goto error_param;
2476 }
2477
2478 for (i = 0; i < irqmap_info->num_vectors; i++) {
2479 map = &irqmap_info->vecmap[i];
2480 /* validate msg params */
2481 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2482 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2483 aq_ret = -EINVAL;
2484 goto error_param;
2485 }
2486 vsi_id = map->vsi_id;
2487
2488 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2489 aq_ret = -EINVAL;
2490 goto error_param;
2491 }
2492
2493 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2494 aq_ret = -EINVAL;
2495 goto error_param;
2496 }
2497
2498 i40e_config_irq_link_list(vf, vsi_id, map);
2499 }
2500 error_param:
2501 /* send the response to the VF */
2502 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2503 aq_ret);
2504 }
2505
2506 /**
2507 * i40e_ctrl_vf_tx_rings
2508 * @vsi: the SRIOV VSI being configured
2509 * @q_map: bit map of the queues to be enabled
2510 * @enable: start or stop the queue
2511 **/
2512 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2513 bool enable)
2514 {
2515 struct i40e_pf *pf = vsi->back;
2516 int ret = 0;
2517 u16 q_id;
2518
2519 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2520 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2521 vsi->base_queue + q_id,
2522 false /*is xdp*/, enable);
2523 if (ret)
2524 break;
2525 }
2526 return ret;
2527 }
2528
2529 /**
2530 * i40e_ctrl_vf_rx_rings
2531 * @vsi: the SRIOV VSI being configured
2532 * @q_map: bit map of the queues to be enabled
2533 * @enable: start or stop the queue
2534 **/
2535 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2536 bool enable)
2537 {
2538 struct i40e_pf *pf = vsi->back;
2539 int ret = 0;
2540 u16 q_id;
2541
2542 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2543 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2544 enable);
2545 if (ret)
2546 break;
2547 }
2548 return ret;
2549 }
2550
2551 /**
2552 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2553 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2554 *
2555 * Returns true if validation was successful, else false.
2556 */
2557 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2558 {
2559 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2560 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2561 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2562 return false;
2563
2564 return true;
2565 }
2566
2567 /**
2568 * i40e_vc_enable_queues_msg
2569 * @vf: pointer to the VF info
2570 * @msg: pointer to the msg buffer
2571 *
2572 * called from the VF to enable all or specific queue(s)
2573 **/
2574 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2575 {
2576 struct virtchnl_queue_select *vqs =
2577 (struct virtchnl_queue_select *)msg;
2578 struct i40e_pf *pf = vf->pf;
2579 int aq_ret = 0;
2580 int i;
2581
2582 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2583 aq_ret = -EINVAL;
2584 goto error_param;
2585 }
2586
2587 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2588 aq_ret = -EINVAL;
2589 goto error_param;
2590 }
2591
2592 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2593 aq_ret = -EINVAL;
2594 goto error_param;
2595 }
2596
2597 /* Use the queue bit map sent by the VF */
2598 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2599 true)) {
2600 aq_ret = -EIO;
2601 goto error_param;
2602 }
2603 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2604 true)) {
2605 aq_ret = -EIO;
2606 goto error_param;
2607 }
2608
2609 /* need to start the rings for additional ADq VSI's as well */
2610 if (vf->adq_enabled) {
2611 /* zero belongs to LAN VSI */
2612 for (i = 1; i < vf->num_tc; i++) {
2613 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2614 aq_ret = -EIO;
2615 }
2616 }
2617
2618 error_param:
2619 /* send the response to the VF */
2620 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2621 aq_ret);
2622 }
2623
2624 /**
2625 * i40e_vc_disable_queues_msg
2626 * @vf: pointer to the VF info
2627 * @msg: pointer to the msg buffer
2628 *
2629 * called from the VF to disable all or specific
2630 * queue(s)
2631 **/
2632 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2633 {
2634 struct virtchnl_queue_select *vqs =
2635 (struct virtchnl_queue_select *)msg;
2636 struct i40e_pf *pf = vf->pf;
2637 int aq_ret = 0;
2638
2639 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2640 aq_ret = -EINVAL;
2641 goto error_param;
2642 }
2643
2644 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2645 aq_ret = -EINVAL;
2646 goto error_param;
2647 }
2648
2649 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2650 aq_ret = -EINVAL;
2651 goto error_param;
2652 }
2653
2654 /* Use the queue bit map sent by the VF */
2655 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2656 false)) {
2657 aq_ret = -EIO;
2658 goto error_param;
2659 }
2660 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2661 false)) {
2662 aq_ret = -EIO;
2663 goto error_param;
2664 }
2665 error_param:
2666 /* send the response to the VF */
2667 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2668 aq_ret);
2669 }
2670
2671 /**
2672 * i40e_check_enough_queue - find big enough queue number
2673 * @vf: pointer to the VF info
2674 * @needed: the number of items needed
2675 *
2676 * Returns the base item index of the queue, or negative for error
2677 **/
2678 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2679 {
2680 unsigned int i, cur_queues, more, pool_size;
2681 struct i40e_lump_tracking *pile;
2682 struct i40e_pf *pf = vf->pf;
2683 struct i40e_vsi *vsi;
2684
2685 vsi = pf->vsi[vf->lan_vsi_idx];
2686 cur_queues = vsi->alloc_queue_pairs;
2687
2688 /* if current allocated queues are enough for need */
2689 if (cur_queues >= needed)
2690 return vsi->base_queue;
2691
2692 pile = pf->qp_pile;
2693 if (cur_queues > 0) {
2694 /* if the allocated queues are not zero
2695 * just check if there are enough queues for more
2696 * behind the allocated queues.
2697 */
2698 more = needed - cur_queues;
2699 for (i = vsi->base_queue + cur_queues;
2700 i < pile->num_entries; i++) {
2701 if (pile->list[i] & I40E_PILE_VALID_BIT)
2702 break;
2703
2704 if (more-- == 1)
2705 /* there is enough */
2706 return vsi->base_queue;
2707 }
2708 }
2709
2710 pool_size = 0;
2711 for (i = 0; i < pile->num_entries; i++) {
2712 if (pile->list[i] & I40E_PILE_VALID_BIT) {
2713 pool_size = 0;
2714 continue;
2715 }
2716 if (needed <= ++pool_size)
2717 /* there is enough */
2718 return i;
2719 }
2720
2721 return -ENOMEM;
2722 }
2723
2724 /**
2725 * i40e_vc_request_queues_msg
2726 * @vf: pointer to the VF info
2727 * @msg: pointer to the msg buffer
2728 *
2729 * VFs get a default number of queues but can use this message to request a
2730 * different number. If the request is successful, PF will reset the VF and
2731 * return 0. If unsuccessful, PF will send message informing VF of number of
2732 * available queues and return result of sending VF a message.
2733 **/
2734 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2735 {
2736 struct virtchnl_vf_res_request *vfres =
2737 (struct virtchnl_vf_res_request *)msg;
2738 u16 req_pairs = vfres->num_queue_pairs;
2739 u8 cur_pairs = vf->num_queue_pairs;
2740 struct i40e_pf *pf = vf->pf;
2741
2742 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2743 return -EINVAL;
2744
2745 if (req_pairs > I40E_MAX_VF_QUEUES) {
2746 dev_err(&pf->pdev->dev,
2747 "VF %d tried to request more than %d queues.\n",
2748 vf->vf_id,
2749 I40E_MAX_VF_QUEUES);
2750 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2751 } else if (req_pairs - cur_pairs > pf->queues_left) {
2752 dev_warn(&pf->pdev->dev,
2753 "VF %d requested %d more queues, but only %d left.\n",
2754 vf->vf_id,
2755 req_pairs - cur_pairs,
2756 pf->queues_left);
2757 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2758 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2759 dev_warn(&pf->pdev->dev,
2760 "VF %d requested %d more queues, but there is not enough for it.\n",
2761 vf->vf_id,
2762 req_pairs - cur_pairs);
2763 vfres->num_queue_pairs = cur_pairs;
2764 } else {
2765 /* successful request */
2766 vf->num_req_queues = req_pairs;
2767 i40e_vc_reset_vf(vf, true);
2768 return 0;
2769 }
2770
2771 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2772 (u8 *)vfres, sizeof(*vfres));
2773 }
2774
2775 /**
2776 * i40e_vc_get_stats_msg
2777 * @vf: pointer to the VF info
2778 * @msg: pointer to the msg buffer
2779 *
2780 * called from the VF to get vsi stats
2781 **/
2782 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2783 {
2784 struct virtchnl_queue_select *vqs =
2785 (struct virtchnl_queue_select *)msg;
2786 struct i40e_pf *pf = vf->pf;
2787 struct i40e_eth_stats stats;
2788 int aq_ret = 0;
2789 struct i40e_vsi *vsi;
2790
2791 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2792
2793 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2794 aq_ret = -EINVAL;
2795 goto error_param;
2796 }
2797
2798 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2799 aq_ret = -EINVAL;
2800 goto error_param;
2801 }
2802
2803 vsi = pf->vsi[vf->lan_vsi_idx];
2804 if (!vsi) {
2805 aq_ret = -EINVAL;
2806 goto error_param;
2807 }
2808 i40e_update_eth_stats(vsi);
2809 stats = vsi->eth_stats;
2810
2811 error_param:
2812 /* send the response back to the VF */
2813 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2814 (u8 *)&stats, sizeof(stats));
2815 }
2816
2817 #define I40E_MAX_MACVLAN_PER_HW 3072
2818 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2819 (num_ports))
2820 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2821 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2822 */
2823 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2824 #define I40E_VC_MAX_VLAN_PER_VF 16
2825
2826 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2827 ({ typeof(vf_num) vf_num_ = (vf_num); \
2828 typeof(num_ports) num_ports_ = (num_ports); \
2829 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2830 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2831 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2832 /**
2833 * i40e_check_vf_permission
2834 * @vf: pointer to the VF info
2835 * @al: MAC address list from virtchnl
2836 *
2837 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2838 * if any address in the list is not valid. Checks the following conditions:
2839 *
2840 * 1) broadcast and zero addresses are never valid
2841 * 2) unicast addresses are not allowed if the VMM has administratively set
2842 * the VF MAC address, unless the VF is marked as privileged.
2843 * 3) There is enough space to add all the addresses.
2844 *
2845 * Note that to guarantee consistency, it is expected this function be called
2846 * while holding the mac_filter_hash_lock, as otherwise the current number of
2847 * addresses might not be accurate.
2848 **/
2849 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2850 struct virtchnl_ether_addr_list *al)
2851 {
2852 struct i40e_pf *pf = vf->pf;
2853 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2854 struct i40e_hw *hw = &pf->hw;
2855 int mac2add_cnt = 0;
2856 int i;
2857
2858 for (i = 0; i < al->num_elements; i++) {
2859 struct i40e_mac_filter *f;
2860 u8 *addr = al->list[i].addr;
2861
2862 if (is_broadcast_ether_addr(addr) ||
2863 is_zero_ether_addr(addr)) {
2864 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2865 addr);
2866 return -EINVAL;
2867 }
2868
2869 /* If the host VMM administrator has set the VF MAC address
2870 * administratively via the ndo_set_vf_mac command then deny
2871 * permission to the VF to add or delete unicast MAC addresses.
2872 * Unless the VF is privileged and then it can do whatever.
2873 * The VF may request to set the MAC address filter already
2874 * assigned to it so do not return an error in that case.
2875 */
2876 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2877 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2878 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2879 dev_err(&pf->pdev->dev,
2880 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2881 return -EPERM;
2882 }
2883
2884 /*count filters that really will be added*/
2885 f = i40e_find_mac(vsi, addr);
2886 if (!f)
2887 ++mac2add_cnt;
2888 }
2889
2890 /* If this VF is not privileged, then we can't add more than a limited
2891 * number of addresses. Check to make sure that the additions do not
2892 * push us over the limit.
2893 */
2894 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2895 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2896 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2897 dev_err(&pf->pdev->dev,
2898 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2899 return -EPERM;
2900 }
2901 /* If this VF is trusted, it can use more resources than untrusted.
2902 * However to ensure that every trusted VF has appropriate number of
2903 * resources, divide whole pool of resources per port and then across
2904 * all VFs.
2905 */
2906 } else {
2907 if ((i40e_count_filters(vsi) + mac2add_cnt) >
2908 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2909 hw->num_ports)) {
2910 dev_err(&pf->pdev->dev,
2911 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2912 return -EPERM;
2913 }
2914 }
2915 return 0;
2916 }
2917
2918 /**
2919 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2920 * @vc_ether_addr: used to extract the type
2921 **/
2922 static u8
2923 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2924 {
2925 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2926 }
2927
2928 /**
2929 * i40e_is_vc_addr_legacy
2930 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2931 *
2932 * check if the MAC address is from an older VF
2933 **/
2934 static bool
2935 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2936 {
2937 return i40e_vc_ether_addr_type(vc_ether_addr) ==
2938 VIRTCHNL_ETHER_ADDR_LEGACY;
2939 }
2940
2941 /**
2942 * i40e_is_vc_addr_primary
2943 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2944 *
2945 * check if the MAC address is the VF's primary MAC
2946 * This function should only be called when the MAC address in
2947 * virtchnl_ether_addr is a valid unicast MAC
2948 **/
2949 static bool
2950 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
2951 {
2952 return i40e_vc_ether_addr_type(vc_ether_addr) ==
2953 VIRTCHNL_ETHER_ADDR_PRIMARY;
2954 }
2955
2956 /**
2957 * i40e_update_vf_mac_addr
2958 * @vf: VF to update
2959 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
2960 *
2961 * update the VF's cached hardware MAC if allowed
2962 **/
2963 static void
2964 i40e_update_vf_mac_addr(struct i40e_vf *vf,
2965 struct virtchnl_ether_addr *vc_ether_addr)
2966 {
2967 u8 *mac_addr = vc_ether_addr->addr;
2968
2969 if (!is_valid_ether_addr(mac_addr))
2970 return;
2971
2972 /* If request to add MAC filter is a primary request update its default
2973 * MAC address with the requested one. If it is a legacy request then
2974 * check if current default is empty if so update the default MAC
2975 */
2976 if (i40e_is_vc_addr_primary(vc_ether_addr)) {
2977 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
2978 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
2979 if (is_zero_ether_addr(vf->default_lan_addr.addr))
2980 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
2981 }
2982 }
2983
2984 /**
2985 * i40e_vc_add_mac_addr_msg
2986 * @vf: pointer to the VF info
2987 * @msg: pointer to the msg buffer
2988 *
2989 * add guest mac address filter
2990 **/
2991 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2992 {
2993 struct virtchnl_ether_addr_list *al =
2994 (struct virtchnl_ether_addr_list *)msg;
2995 struct i40e_pf *pf = vf->pf;
2996 struct i40e_vsi *vsi = NULL;
2997 int ret = 0;
2998 int i;
2999
3000 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3001 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3002 ret = -EINVAL;
3003 goto error_param;
3004 }
3005
3006 vsi = pf->vsi[vf->lan_vsi_idx];
3007
3008 /* Lock once, because all function inside for loop accesses VSI's
3009 * MAC filter list which needs to be protected using same lock.
3010 */
3011 spin_lock_bh(&vsi->mac_filter_hash_lock);
3012
3013 ret = i40e_check_vf_permission(vf, al);
3014 if (ret) {
3015 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3016 goto error_param;
3017 }
3018
3019 /* add new addresses to the list */
3020 for (i = 0; i < al->num_elements; i++) {
3021 struct i40e_mac_filter *f;
3022
3023 f = i40e_find_mac(vsi, al->list[i].addr);
3024 if (!f) {
3025 f = i40e_add_mac_filter(vsi, al->list[i].addr);
3026
3027 if (!f) {
3028 dev_err(&pf->pdev->dev,
3029 "Unable to add MAC filter %pM for VF %d\n",
3030 al->list[i].addr, vf->vf_id);
3031 ret = -EINVAL;
3032 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3033 goto error_param;
3034 }
3035 }
3036 i40e_update_vf_mac_addr(vf, &al->list[i]);
3037 }
3038 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3039
3040 /* program the updated filter list */
3041 ret = i40e_sync_vsi_filters(vsi);
3042 if (ret)
3043 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3044 vf->vf_id, ret);
3045
3046 error_param:
3047 /* send the response to the VF */
3048 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3049 ret, NULL, 0);
3050 }
3051
3052 /**
3053 * i40e_vc_del_mac_addr_msg
3054 * @vf: pointer to the VF info
3055 * @msg: pointer to the msg buffer
3056 *
3057 * remove guest mac address filter
3058 **/
3059 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3060 {
3061 struct virtchnl_ether_addr_list *al =
3062 (struct virtchnl_ether_addr_list *)msg;
3063 bool was_unimac_deleted = false;
3064 struct i40e_pf *pf = vf->pf;
3065 struct i40e_vsi *vsi = NULL;
3066 int ret = 0;
3067 int i;
3068
3069 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3070 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3071 ret = -EINVAL;
3072 goto error_param;
3073 }
3074
3075 for (i = 0; i < al->num_elements; i++) {
3076 if (is_broadcast_ether_addr(al->list[i].addr) ||
3077 is_zero_ether_addr(al->list[i].addr)) {
3078 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3079 al->list[i].addr, vf->vf_id);
3080 ret = -EINVAL;
3081 goto error_param;
3082 }
3083 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
3084 was_unimac_deleted = true;
3085 }
3086 vsi = pf->vsi[vf->lan_vsi_idx];
3087
3088 spin_lock_bh(&vsi->mac_filter_hash_lock);
3089 /* delete addresses from the list */
3090 for (i = 0; i < al->num_elements; i++)
3091 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3092 ret = -EINVAL;
3093 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3094 goto error_param;
3095 }
3096
3097 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3098
3099 if (was_unimac_deleted)
3100 eth_zero_addr(vf->default_lan_addr.addr);
3101
3102 /* program the updated filter list */
3103 ret = i40e_sync_vsi_filters(vsi);
3104 if (ret)
3105 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3106 vf->vf_id, ret);
3107
3108 if (vf->trusted && was_unimac_deleted) {
3109 struct i40e_mac_filter *f;
3110 struct hlist_node *h;
3111 u8 *macaddr = NULL;
3112 int bkt;
3113
3114 /* set last unicast mac address as default */
3115 spin_lock_bh(&vsi->mac_filter_hash_lock);
3116 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3117 if (is_valid_ether_addr(f->macaddr))
3118 macaddr = f->macaddr;
3119 }
3120 if (macaddr)
3121 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3122 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3123 }
3124 error_param:
3125 /* send the response to the VF */
3126 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3127 }
3128
3129 /**
3130 * i40e_vc_add_vlan_msg
3131 * @vf: pointer to the VF info
3132 * @msg: pointer to the msg buffer
3133 *
3134 * program guest vlan id
3135 **/
3136 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3137 {
3138 struct virtchnl_vlan_filter_list *vfl =
3139 (struct virtchnl_vlan_filter_list *)msg;
3140 struct i40e_pf *pf = vf->pf;
3141 struct i40e_vsi *vsi = NULL;
3142 int aq_ret = 0;
3143 int i;
3144
3145 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3146 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3147 dev_err(&pf->pdev->dev,
3148 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3149 goto error_param;
3150 }
3151 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3152 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3153 aq_ret = -EINVAL;
3154 goto error_param;
3155 }
3156
3157 for (i = 0; i < vfl->num_elements; i++) {
3158 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3159 aq_ret = -EINVAL;
3160 dev_err(&pf->pdev->dev,
3161 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3162 goto error_param;
3163 }
3164 }
3165 vsi = pf->vsi[vf->lan_vsi_idx];
3166 if (vsi->info.pvid) {
3167 aq_ret = -EINVAL;
3168 goto error_param;
3169 }
3170
3171 i40e_vlan_stripping_enable(vsi);
3172 for (i = 0; i < vfl->num_elements; i++) {
3173 /* add new VLAN filter */
3174 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3175 if (!ret)
3176 vf->num_vlan++;
3177
3178 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3179 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3180 true,
3181 vfl->vlan_id[i],
3182 NULL);
3183 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3184 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3185 true,
3186 vfl->vlan_id[i],
3187 NULL);
3188
3189 if (ret)
3190 dev_err(&pf->pdev->dev,
3191 "Unable to add VLAN filter %d for VF %d, error %d\n",
3192 vfl->vlan_id[i], vf->vf_id, ret);
3193 }
3194
3195 error_param:
3196 /* send the response to the VF */
3197 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3198 }
3199
3200 /**
3201 * i40e_vc_remove_vlan_msg
3202 * @vf: pointer to the VF info
3203 * @msg: pointer to the msg buffer
3204 *
3205 * remove programmed guest vlan id
3206 **/
3207 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3208 {
3209 struct virtchnl_vlan_filter_list *vfl =
3210 (struct virtchnl_vlan_filter_list *)msg;
3211 struct i40e_pf *pf = vf->pf;
3212 struct i40e_vsi *vsi = NULL;
3213 int aq_ret = 0;
3214 int i;
3215
3216 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3217 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3218 aq_ret = -EINVAL;
3219 goto error_param;
3220 }
3221
3222 for (i = 0; i < vfl->num_elements; i++) {
3223 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3224 aq_ret = -EINVAL;
3225 goto error_param;
3226 }
3227 }
3228
3229 vsi = pf->vsi[vf->lan_vsi_idx];
3230 if (vsi->info.pvid) {
3231 if (vfl->num_elements > 1 || vfl->vlan_id[0])
3232 aq_ret = -EINVAL;
3233 goto error_param;
3234 }
3235
3236 for (i = 0; i < vfl->num_elements; i++) {
3237 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3238 vf->num_vlan--;
3239
3240 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3241 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3242 false,
3243 vfl->vlan_id[i],
3244 NULL);
3245 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3246 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3247 false,
3248 vfl->vlan_id[i],
3249 NULL);
3250 }
3251
3252 error_param:
3253 /* send the response to the VF */
3254 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3255 }
3256
3257 /**
3258 * i40e_vc_rdma_msg
3259 * @vf: pointer to the VF info
3260 * @msg: pointer to the msg buffer
3261 * @msglen: msg length
3262 *
3263 * called from the VF for the iwarp msgs
3264 **/
3265 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3266 {
3267 struct i40e_pf *pf = vf->pf;
3268 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3269 int aq_ret = 0;
3270
3271 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3272 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3273 aq_ret = -EINVAL;
3274 goto error_param;
3275 }
3276
3277 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3278 msg, msglen);
3279
3280 error_param:
3281 /* send the response to the VF */
3282 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3283 aq_ret);
3284 }
3285
3286 /**
3287 * i40e_vc_rdma_qvmap_msg
3288 * @vf: pointer to the VF info
3289 * @msg: pointer to the msg buffer
3290 * @config: config qvmap or release it
3291 *
3292 * called from the VF for the iwarp msgs
3293 **/
3294 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3295 {
3296 struct virtchnl_rdma_qvlist_info *qvlist_info =
3297 (struct virtchnl_rdma_qvlist_info *)msg;
3298 int aq_ret = 0;
3299
3300 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3301 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3302 aq_ret = -EINVAL;
3303 goto error_param;
3304 }
3305
3306 if (config) {
3307 if (i40e_config_rdma_qvlist(vf, qvlist_info))
3308 aq_ret = -EINVAL;
3309 } else {
3310 i40e_release_rdma_qvlist(vf);
3311 }
3312
3313 error_param:
3314 /* send the response to the VF */
3315 return i40e_vc_send_resp_to_vf(vf,
3316 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3317 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3318 aq_ret);
3319 }
3320
3321 /**
3322 * i40e_vc_config_rss_key
3323 * @vf: pointer to the VF info
3324 * @msg: pointer to the msg buffer
3325 *
3326 * Configure the VF's RSS key
3327 **/
3328 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3329 {
3330 struct virtchnl_rss_key *vrk =
3331 (struct virtchnl_rss_key *)msg;
3332 struct i40e_pf *pf = vf->pf;
3333 struct i40e_vsi *vsi = NULL;
3334 int aq_ret = 0;
3335
3336 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3337 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3338 vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3339 aq_ret = -EINVAL;
3340 goto err;
3341 }
3342
3343 vsi = pf->vsi[vf->lan_vsi_idx];
3344 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3345 err:
3346 /* send the response to the VF */
3347 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3348 aq_ret);
3349 }
3350
3351 /**
3352 * i40e_vc_config_rss_lut
3353 * @vf: pointer to the VF info
3354 * @msg: pointer to the msg buffer
3355 *
3356 * Configure the VF's RSS LUT
3357 **/
3358 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3359 {
3360 struct virtchnl_rss_lut *vrl =
3361 (struct virtchnl_rss_lut *)msg;
3362 struct i40e_pf *pf = vf->pf;
3363 struct i40e_vsi *vsi = NULL;
3364 int aq_ret = 0;
3365 u16 i;
3366
3367 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3368 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3369 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3370 aq_ret = -EINVAL;
3371 goto err;
3372 }
3373
3374 for (i = 0; i < vrl->lut_entries; i++)
3375 if (vrl->lut[i] >= vf->num_queue_pairs) {
3376 aq_ret = -EINVAL;
3377 goto err;
3378 }
3379
3380 vsi = pf->vsi[vf->lan_vsi_idx];
3381 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3382 /* send the response to the VF */
3383 err:
3384 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3385 aq_ret);
3386 }
3387
3388 /**
3389 * i40e_vc_get_rss_hena
3390 * @vf: pointer to the VF info
3391 * @msg: pointer to the msg buffer
3392 *
3393 * Return the RSS HENA bits allowed by the hardware
3394 **/
3395 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3396 {
3397 struct virtchnl_rss_hena *vrh = NULL;
3398 struct i40e_pf *pf = vf->pf;
3399 int aq_ret = 0;
3400 int len = 0;
3401
3402 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3403 aq_ret = -EINVAL;
3404 goto err;
3405 }
3406 len = sizeof(struct virtchnl_rss_hena);
3407
3408 vrh = kzalloc(len, GFP_KERNEL);
3409 if (!vrh) {
3410 aq_ret = -ENOMEM;
3411 len = 0;
3412 goto err;
3413 }
3414 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3415 err:
3416 /* send the response back to the VF */
3417 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3418 aq_ret, (u8 *)vrh, len);
3419 kfree(vrh);
3420 return aq_ret;
3421 }
3422
3423 /**
3424 * i40e_vc_set_rss_hena
3425 * @vf: pointer to the VF info
3426 * @msg: pointer to the msg buffer
3427 *
3428 * Set the RSS HENA bits for the VF
3429 **/
3430 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3431 {
3432 struct virtchnl_rss_hena *vrh =
3433 (struct virtchnl_rss_hena *)msg;
3434 struct i40e_pf *pf = vf->pf;
3435 struct i40e_hw *hw = &pf->hw;
3436 int aq_ret = 0;
3437
3438 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3439 aq_ret = -EINVAL;
3440 goto err;
3441 }
3442 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3443 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3444 (u32)(vrh->hena >> 32));
3445
3446 /* send the response to the VF */
3447 err:
3448 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3449 }
3450
3451 /**
3452 * i40e_vc_enable_vlan_stripping
3453 * @vf: pointer to the VF info
3454 * @msg: pointer to the msg buffer
3455 *
3456 * Enable vlan header stripping for the VF
3457 **/
3458 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3459 {
3460 struct i40e_vsi *vsi;
3461 int aq_ret = 0;
3462
3463 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3464 aq_ret = -EINVAL;
3465 goto err;
3466 }
3467
3468 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3469 i40e_vlan_stripping_enable(vsi);
3470
3471 /* send the response to the VF */
3472 err:
3473 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3474 aq_ret);
3475 }
3476
3477 /**
3478 * i40e_vc_disable_vlan_stripping
3479 * @vf: pointer to the VF info
3480 * @msg: pointer to the msg buffer
3481 *
3482 * Disable vlan header stripping for the VF
3483 **/
3484 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3485 {
3486 struct i40e_vsi *vsi;
3487 int aq_ret = 0;
3488
3489 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3490 aq_ret = -EINVAL;
3491 goto err;
3492 }
3493
3494 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3495 i40e_vlan_stripping_disable(vsi);
3496
3497 /* send the response to the VF */
3498 err:
3499 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3500 aq_ret);
3501 }
3502
3503 /**
3504 * i40e_validate_cloud_filter
3505 * @vf: pointer to VF structure
3506 * @tc_filter: pointer to filter requested
3507 *
3508 * This function validates cloud filter programmed as TC filter for ADq
3509 **/
3510 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3511 struct virtchnl_filter *tc_filter)
3512 {
3513 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3514 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3515 struct i40e_pf *pf = vf->pf;
3516 struct i40e_vsi *vsi = NULL;
3517 struct i40e_mac_filter *f;
3518 struct hlist_node *h;
3519 bool found = false;
3520 int bkt;
3521
3522 if (!tc_filter->action) {
3523 dev_info(&pf->pdev->dev,
3524 "VF %d: Currently ADq doesn't support Drop Action\n",
3525 vf->vf_id);
3526 goto err;
3527 }
3528
3529 /* action_meta is TC number here to which the filter is applied */
3530 if (!tc_filter->action_meta ||
3531 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3532 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3533 vf->vf_id, tc_filter->action_meta);
3534 goto err;
3535 }
3536
3537 /* Check filter if it's programmed for advanced mode or basic mode.
3538 * There are two ADq modes (for VF only),
3539 * 1. Basic mode: intended to allow as many filter options as possible
3540 * to be added to a VF in Non-trusted mode. Main goal is
3541 * to add filters to its own MAC and VLAN id.
3542 * 2. Advanced mode: is for allowing filters to be applied other than
3543 * its own MAC or VLAN. This mode requires the VF to be
3544 * Trusted.
3545 */
3546 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3547 vsi = pf->vsi[vf->lan_vsi_idx];
3548 f = i40e_find_mac(vsi, data.dst_mac);
3549
3550 if (!f) {
3551 dev_info(&pf->pdev->dev,
3552 "Destination MAC %pM doesn't belong to VF %d\n",
3553 data.dst_mac, vf->vf_id);
3554 goto err;
3555 }
3556
3557 if (mask.vlan_id) {
3558 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3559 hlist) {
3560 if (f->vlan == ntohs(data.vlan_id)) {
3561 found = true;
3562 break;
3563 }
3564 }
3565 if (!found) {
3566 dev_info(&pf->pdev->dev,
3567 "VF %d doesn't have any VLAN id %u\n",
3568 vf->vf_id, ntohs(data.vlan_id));
3569 goto err;
3570 }
3571 }
3572 } else {
3573 /* Check if VF is trusted */
3574 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3575 dev_err(&pf->pdev->dev,
3576 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3577 vf->vf_id);
3578 return -EIO;
3579 }
3580 }
3581
3582 if (mask.dst_mac[0] & data.dst_mac[0]) {
3583 if (is_broadcast_ether_addr(data.dst_mac) ||
3584 is_zero_ether_addr(data.dst_mac)) {
3585 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3586 vf->vf_id, data.dst_mac);
3587 goto err;
3588 }
3589 }
3590
3591 if (mask.src_mac[0] & data.src_mac[0]) {
3592 if (is_broadcast_ether_addr(data.src_mac) ||
3593 is_zero_ether_addr(data.src_mac)) {
3594 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3595 vf->vf_id, data.src_mac);
3596 goto err;
3597 }
3598 }
3599
3600 if (mask.dst_port & data.dst_port) {
3601 if (!data.dst_port) {
3602 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3603 vf->vf_id);
3604 goto err;
3605 }
3606 }
3607
3608 if (mask.src_port & data.src_port) {
3609 if (!data.src_port) {
3610 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3611 vf->vf_id);
3612 goto err;
3613 }
3614 }
3615
3616 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3617 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3618 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3619 vf->vf_id);
3620 goto err;
3621 }
3622
3623 if (mask.vlan_id & data.vlan_id) {
3624 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3625 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3626 vf->vf_id);
3627 goto err;
3628 }
3629 }
3630
3631 return 0;
3632 err:
3633 return -EIO;
3634 }
3635
3636 /**
3637 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3638 * @vf: pointer to the VF info
3639 * @seid: seid of the vsi it is searching for
3640 **/
3641 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3642 {
3643 struct i40e_pf *pf = vf->pf;
3644 struct i40e_vsi *vsi = NULL;
3645 int i;
3646
3647 for (i = 0; i < vf->num_tc ; i++) {
3648 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3649 if (vsi && vsi->seid == seid)
3650 return vsi;
3651 }
3652 return NULL;
3653 }
3654
3655 /**
3656 * i40e_del_all_cloud_filters
3657 * @vf: pointer to the VF info
3658 *
3659 * This function deletes all cloud filters
3660 **/
3661 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3662 {
3663 struct i40e_cloud_filter *cfilter = NULL;
3664 struct i40e_pf *pf = vf->pf;
3665 struct i40e_vsi *vsi = NULL;
3666 struct hlist_node *node;
3667 int ret;
3668
3669 hlist_for_each_entry_safe(cfilter, node,
3670 &vf->cloud_filter_list, cloud_node) {
3671 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3672
3673 if (!vsi) {
3674 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3675 vf->vf_id, cfilter->seid);
3676 continue;
3677 }
3678
3679 if (cfilter->dst_port)
3680 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3681 false);
3682 else
3683 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3684 if (ret)
3685 dev_err(&pf->pdev->dev,
3686 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3687 vf->vf_id, ERR_PTR(ret),
3688 i40e_aq_str(&pf->hw,
3689 pf->hw.aq.asq_last_status));
3690
3691 hlist_del(&cfilter->cloud_node);
3692 kfree(cfilter);
3693 vf->num_cloud_filters--;
3694 }
3695 }
3696
3697 /**
3698 * i40e_vc_del_cloud_filter
3699 * @vf: pointer to the VF info
3700 * @msg: pointer to the msg buffer
3701 *
3702 * This function deletes a cloud filter programmed as TC filter for ADq
3703 **/
3704 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3705 {
3706 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3707 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3708 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3709 struct i40e_cloud_filter cfilter, *cf = NULL;
3710 struct i40e_pf *pf = vf->pf;
3711 struct i40e_vsi *vsi = NULL;
3712 struct hlist_node *node;
3713 int aq_ret = 0;
3714 int i, ret;
3715
3716 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3717 aq_ret = -EINVAL;
3718 goto err;
3719 }
3720
3721 if (!vf->adq_enabled) {
3722 dev_info(&pf->pdev->dev,
3723 "VF %d: ADq not enabled, can't apply cloud filter\n",
3724 vf->vf_id);
3725 aq_ret = -EINVAL;
3726 goto err;
3727 }
3728
3729 if (i40e_validate_cloud_filter(vf, vcf)) {
3730 dev_info(&pf->pdev->dev,
3731 "VF %d: Invalid input, can't apply cloud filter\n",
3732 vf->vf_id);
3733 aq_ret = -EINVAL;
3734 goto err;
3735 }
3736
3737 memset(&cfilter, 0, sizeof(cfilter));
3738 /* parse destination mac address */
3739 for (i = 0; i < ETH_ALEN; i++)
3740 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3741
3742 /* parse source mac address */
3743 for (i = 0; i < ETH_ALEN; i++)
3744 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3745
3746 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3747 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3748 cfilter.src_port = mask.src_port & tcf.src_port;
3749
3750 switch (vcf->flow_type) {
3751 case VIRTCHNL_TCP_V4_FLOW:
3752 cfilter.n_proto = ETH_P_IP;
3753 if (mask.dst_ip[0] & tcf.dst_ip[0])
3754 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3755 ARRAY_SIZE(tcf.dst_ip));
3756 else if (mask.src_ip[0] & tcf.dst_ip[0])
3757 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3758 ARRAY_SIZE(tcf.dst_ip));
3759 break;
3760 case VIRTCHNL_TCP_V6_FLOW:
3761 cfilter.n_proto = ETH_P_IPV6;
3762 if (mask.dst_ip[3] & tcf.dst_ip[3])
3763 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3764 sizeof(cfilter.ip.v6.dst_ip6));
3765 if (mask.src_ip[3] & tcf.src_ip[3])
3766 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3767 sizeof(cfilter.ip.v6.src_ip6));
3768 break;
3769 default:
3770 /* TC filter can be configured based on different combinations
3771 * and in this case IP is not a part of filter config
3772 */
3773 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3774 vf->vf_id);
3775 }
3776
3777 /* get the vsi to which the tc belongs to */
3778 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3779 cfilter.seid = vsi->seid;
3780 cfilter.flags = vcf->field_flags;
3781
3782 /* Deleting TC filter */
3783 if (tcf.dst_port)
3784 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3785 else
3786 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3787 if (ret) {
3788 dev_err(&pf->pdev->dev,
3789 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3790 vf->vf_id, ERR_PTR(ret),
3791 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3792 goto err;
3793 }
3794
3795 hlist_for_each_entry_safe(cf, node,
3796 &vf->cloud_filter_list, cloud_node) {
3797 if (cf->seid != cfilter.seid)
3798 continue;
3799 if (mask.dst_port)
3800 if (cfilter.dst_port != cf->dst_port)
3801 continue;
3802 if (mask.dst_mac[0])
3803 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3804 continue;
3805 /* for ipv4 data to be valid, only first byte of mask is set */
3806 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3807 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3808 ARRAY_SIZE(tcf.dst_ip)))
3809 continue;
3810 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3811 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3812 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3813 sizeof(cfilter.ip.v6.src_ip6)))
3814 continue;
3815 if (mask.vlan_id)
3816 if (cfilter.vlan_id != cf->vlan_id)
3817 continue;
3818
3819 hlist_del(&cf->cloud_node);
3820 kfree(cf);
3821 vf->num_cloud_filters--;
3822 }
3823
3824 err:
3825 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3826 aq_ret);
3827 }
3828
3829 /**
3830 * i40e_vc_add_cloud_filter
3831 * @vf: pointer to the VF info
3832 * @msg: pointer to the msg buffer
3833 *
3834 * This function adds a cloud filter programmed as TC filter for ADq
3835 **/
3836 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3837 {
3838 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3839 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3840 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3841 struct i40e_cloud_filter *cfilter = NULL;
3842 struct i40e_pf *pf = vf->pf;
3843 struct i40e_vsi *vsi = NULL;
3844 int aq_ret = 0;
3845 int i, ret;
3846
3847 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3848 aq_ret = -EINVAL;
3849 goto err_out;
3850 }
3851
3852 if (!vf->adq_enabled) {
3853 dev_info(&pf->pdev->dev,
3854 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3855 vf->vf_id);
3856 aq_ret = -EINVAL;
3857 goto err_out;
3858 }
3859
3860 if (i40e_validate_cloud_filter(vf, vcf)) {
3861 dev_info(&pf->pdev->dev,
3862 "VF %d: Invalid input/s, can't apply cloud filter\n",
3863 vf->vf_id);
3864 aq_ret = -EINVAL;
3865 goto err_out;
3866 }
3867
3868 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3869 if (!cfilter)
3870 return -ENOMEM;
3871
3872 /* parse destination mac address */
3873 for (i = 0; i < ETH_ALEN; i++)
3874 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3875
3876 /* parse source mac address */
3877 for (i = 0; i < ETH_ALEN; i++)
3878 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3879
3880 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3881 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3882 cfilter->src_port = mask.src_port & tcf.src_port;
3883
3884 switch (vcf->flow_type) {
3885 case VIRTCHNL_TCP_V4_FLOW:
3886 cfilter->n_proto = ETH_P_IP;
3887 if (mask.dst_ip[0] & tcf.dst_ip[0])
3888 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3889 ARRAY_SIZE(tcf.dst_ip));
3890 else if (mask.src_ip[0] & tcf.dst_ip[0])
3891 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3892 ARRAY_SIZE(tcf.dst_ip));
3893 break;
3894 case VIRTCHNL_TCP_V6_FLOW:
3895 cfilter->n_proto = ETH_P_IPV6;
3896 if (mask.dst_ip[3] & tcf.dst_ip[3])
3897 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3898 sizeof(cfilter->ip.v6.dst_ip6));
3899 if (mask.src_ip[3] & tcf.src_ip[3])
3900 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3901 sizeof(cfilter->ip.v6.src_ip6));
3902 break;
3903 default:
3904 /* TC filter can be configured based on different combinations
3905 * and in this case IP is not a part of filter config
3906 */
3907 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3908 vf->vf_id);
3909 }
3910
3911 /* get the VSI to which the TC belongs to */
3912 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3913 cfilter->seid = vsi->seid;
3914 cfilter->flags = vcf->field_flags;
3915
3916 /* Adding cloud filter programmed as TC filter */
3917 if (tcf.dst_port)
3918 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3919 else
3920 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3921 if (ret) {
3922 dev_err(&pf->pdev->dev,
3923 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3924 vf->vf_id, ERR_PTR(ret),
3925 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3926 goto err_free;
3927 }
3928
3929 INIT_HLIST_NODE(&cfilter->cloud_node);
3930 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3931 /* release the pointer passing it to the collection */
3932 cfilter = NULL;
3933 vf->num_cloud_filters++;
3934 err_free:
3935 kfree(cfilter);
3936 err_out:
3937 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3938 aq_ret);
3939 }
3940
3941 /**
3942 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3943 * @vf: pointer to the VF info
3944 * @msg: pointer to the msg buffer
3945 **/
3946 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3947 {
3948 struct virtchnl_tc_info *tci =
3949 (struct virtchnl_tc_info *)msg;
3950 struct i40e_pf *pf = vf->pf;
3951 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3952 int i, adq_request_qps = 0;
3953 int aq_ret = 0;
3954 u64 speed = 0;
3955
3956 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3957 aq_ret = -EINVAL;
3958 goto err;
3959 }
3960
3961 /* ADq cannot be applied if spoof check is ON */
3962 if (vf->spoofchk) {
3963 dev_err(&pf->pdev->dev,
3964 "Spoof check is ON, turn it OFF to enable ADq\n");
3965 aq_ret = -EINVAL;
3966 goto err;
3967 }
3968
3969 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3970 dev_err(&pf->pdev->dev,
3971 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3972 vf->vf_id);
3973 aq_ret = -EINVAL;
3974 goto err;
3975 }
3976
3977 /* max number of traffic classes for VF currently capped at 4 */
3978 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3979 dev_err(&pf->pdev->dev,
3980 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3981 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3982 aq_ret = -EINVAL;
3983 goto err;
3984 }
3985
3986 /* validate queues for each TC */
3987 for (i = 0; i < tci->num_tc; i++)
3988 if (!tci->list[i].count ||
3989 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3990 dev_err(&pf->pdev->dev,
3991 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3992 vf->vf_id, i, tci->list[i].count,
3993 I40E_DEFAULT_QUEUES_PER_VF);
3994 aq_ret = -EINVAL;
3995 goto err;
3996 }
3997
3998 /* need Max VF queues but already have default number of queues */
3999 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4000
4001 if (pf->queues_left < adq_request_qps) {
4002 dev_err(&pf->pdev->dev,
4003 "No queues left to allocate to VF %d\n",
4004 vf->vf_id);
4005 aq_ret = -EINVAL;
4006 goto err;
4007 } else {
4008 /* we need to allocate max VF queues to enable ADq so as to
4009 * make sure ADq enabled VF always gets back queues when it
4010 * goes through a reset.
4011 */
4012 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4013 }
4014
4015 /* get link speed in MB to validate rate limit */
4016 speed = i40e_vc_link_speed2mbps(ls->link_speed);
4017 if (speed == SPEED_UNKNOWN) {
4018 dev_err(&pf->pdev->dev,
4019 "Cannot detect link speed\n");
4020 aq_ret = -EINVAL;
4021 goto err;
4022 }
4023
4024 /* parse data from the queue channel info */
4025 vf->num_tc = tci->num_tc;
4026 for (i = 0; i < vf->num_tc; i++) {
4027 if (tci->list[i].max_tx_rate) {
4028 if (tci->list[i].max_tx_rate > speed) {
4029 dev_err(&pf->pdev->dev,
4030 "Invalid max tx rate %llu specified for VF %d.",
4031 tci->list[i].max_tx_rate,
4032 vf->vf_id);
4033 aq_ret = -EINVAL;
4034 goto err;
4035 } else {
4036 vf->ch[i].max_tx_rate =
4037 tci->list[i].max_tx_rate;
4038 }
4039 }
4040 vf->ch[i].num_qps = tci->list[i].count;
4041 }
4042
4043 /* set this flag only after making sure all inputs are sane */
4044 vf->adq_enabled = true;
4045
4046 /* reset the VF in order to allocate resources */
4047 i40e_vc_reset_vf(vf, true);
4048
4049 return 0;
4050
4051 /* send the response to the VF */
4052 err:
4053 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4054 aq_ret);
4055 }
4056
4057 /**
4058 * i40e_vc_del_qch_msg
4059 * @vf: pointer to the VF info
4060 * @msg: pointer to the msg buffer
4061 **/
4062 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4063 {
4064 struct i40e_pf *pf = vf->pf;
4065 int aq_ret = 0;
4066
4067 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4068 aq_ret = -EINVAL;
4069 goto err;
4070 }
4071
4072 if (vf->adq_enabled) {
4073 i40e_del_all_cloud_filters(vf);
4074 i40e_del_qch(vf);
4075 vf->adq_enabled = false;
4076 vf->num_tc = 0;
4077 dev_info(&pf->pdev->dev,
4078 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4079 vf->vf_id);
4080 } else {
4081 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4082 vf->vf_id);
4083 aq_ret = -EINVAL;
4084 }
4085
4086 /* reset the VF in order to allocate resources */
4087 i40e_vc_reset_vf(vf, true);
4088
4089 return 0;
4090
4091 err:
4092 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4093 aq_ret);
4094 }
4095
4096 /**
4097 * i40e_vc_process_vf_msg
4098 * @pf: pointer to the PF structure
4099 * @vf_id: source VF id
4100 * @v_opcode: operation code
4101 * @v_retval: unused return value code
4102 * @msg: pointer to the msg buffer
4103 * @msglen: msg length
4104 *
4105 * called from the common aeq/arq handler to
4106 * process request from VF
4107 **/
4108 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4109 u32 __always_unused v_retval, u8 *msg, u16 msglen)
4110 {
4111 struct i40e_hw *hw = &pf->hw;
4112 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4113 struct i40e_vf *vf;
4114 int ret;
4115
4116 pf->vf_aq_requests++;
4117 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4118 return -EINVAL;
4119 vf = &(pf->vf[local_vf_id]);
4120
4121 /* Check if VF is disabled. */
4122 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4123 return -EINVAL;
4124
4125 /* perform basic checks on the msg */
4126 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4127
4128 if (ret) {
4129 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4130 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4131 local_vf_id, v_opcode, msglen);
4132 return ret;
4133 }
4134
4135 switch (v_opcode) {
4136 case VIRTCHNL_OP_VERSION:
4137 ret = i40e_vc_get_version_msg(vf, msg);
4138 break;
4139 case VIRTCHNL_OP_GET_VF_RESOURCES:
4140 ret = i40e_vc_get_vf_resources_msg(vf, msg);
4141 i40e_vc_notify_vf_link_state(vf);
4142 break;
4143 case VIRTCHNL_OP_RESET_VF:
4144 i40e_vc_reset_vf(vf, false);
4145 ret = 0;
4146 break;
4147 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4148 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4149 break;
4150 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4151 ret = i40e_vc_config_queues_msg(vf, msg);
4152 break;
4153 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4154 ret = i40e_vc_config_irq_map_msg(vf, msg);
4155 break;
4156 case VIRTCHNL_OP_ENABLE_QUEUES:
4157 ret = i40e_vc_enable_queues_msg(vf, msg);
4158 i40e_vc_notify_vf_link_state(vf);
4159 break;
4160 case VIRTCHNL_OP_DISABLE_QUEUES:
4161 ret = i40e_vc_disable_queues_msg(vf, msg);
4162 break;
4163 case VIRTCHNL_OP_ADD_ETH_ADDR:
4164 ret = i40e_vc_add_mac_addr_msg(vf, msg);
4165 break;
4166 case VIRTCHNL_OP_DEL_ETH_ADDR:
4167 ret = i40e_vc_del_mac_addr_msg(vf, msg);
4168 break;
4169 case VIRTCHNL_OP_ADD_VLAN:
4170 ret = i40e_vc_add_vlan_msg(vf, msg);
4171 break;
4172 case VIRTCHNL_OP_DEL_VLAN:
4173 ret = i40e_vc_remove_vlan_msg(vf, msg);
4174 break;
4175 case VIRTCHNL_OP_GET_STATS:
4176 ret = i40e_vc_get_stats_msg(vf, msg);
4177 break;
4178 case VIRTCHNL_OP_RDMA:
4179 ret = i40e_vc_rdma_msg(vf, msg, msglen);
4180 break;
4181 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4182 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4183 break;
4184 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4185 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4186 break;
4187 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4188 ret = i40e_vc_config_rss_key(vf, msg);
4189 break;
4190 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4191 ret = i40e_vc_config_rss_lut(vf, msg);
4192 break;
4193 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4194 ret = i40e_vc_get_rss_hena(vf, msg);
4195 break;
4196 case VIRTCHNL_OP_SET_RSS_HENA:
4197 ret = i40e_vc_set_rss_hena(vf, msg);
4198 break;
4199 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4200 ret = i40e_vc_enable_vlan_stripping(vf, msg);
4201 break;
4202 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4203 ret = i40e_vc_disable_vlan_stripping(vf, msg);
4204 break;
4205 case VIRTCHNL_OP_REQUEST_QUEUES:
4206 ret = i40e_vc_request_queues_msg(vf, msg);
4207 break;
4208 case VIRTCHNL_OP_ENABLE_CHANNELS:
4209 ret = i40e_vc_add_qch_msg(vf, msg);
4210 break;
4211 case VIRTCHNL_OP_DISABLE_CHANNELS:
4212 ret = i40e_vc_del_qch_msg(vf, msg);
4213 break;
4214 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4215 ret = i40e_vc_add_cloud_filter(vf, msg);
4216 break;
4217 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4218 ret = i40e_vc_del_cloud_filter(vf, msg);
4219 break;
4220 case VIRTCHNL_OP_UNKNOWN:
4221 default:
4222 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4223 v_opcode, local_vf_id);
4224 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4225 -EOPNOTSUPP);
4226 break;
4227 }
4228
4229 return ret;
4230 }
4231
4232 /**
4233 * i40e_vc_process_vflr_event
4234 * @pf: pointer to the PF structure
4235 *
4236 * called from the vlfr irq handler to
4237 * free up VF resources and state variables
4238 **/
4239 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4240 {
4241 struct i40e_hw *hw = &pf->hw;
4242 u32 reg, reg_idx, bit_idx;
4243 struct i40e_vf *vf;
4244 int vf_id;
4245
4246 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4247 return 0;
4248
4249 /* Re-enable the VFLR interrupt cause here, before looking for which
4250 * VF got reset. Otherwise, if another VF gets a reset while the
4251 * first one is being processed, that interrupt will be lost, and
4252 * that VF will be stuck in reset forever.
4253 */
4254 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4255 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4256 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4257 i40e_flush(hw);
4258
4259 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4260 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4261 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4262 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4263 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4264 vf = &pf->vf[vf_id];
4265 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4266 if (reg & BIT(bit_idx))
4267 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4268 i40e_reset_vf(vf, true);
4269 }
4270
4271 return 0;
4272 }
4273
4274 /**
4275 * i40e_validate_vf
4276 * @pf: the physical function
4277 * @vf_id: VF identifier
4278 *
4279 * Check that the VF is enabled and the VSI exists.
4280 *
4281 * Returns 0 on success, negative on failure
4282 **/
4283 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4284 {
4285 struct i40e_vsi *vsi;
4286 struct i40e_vf *vf;
4287 int ret = 0;
4288
4289 if (vf_id >= pf->num_alloc_vfs) {
4290 dev_err(&pf->pdev->dev,
4291 "Invalid VF Identifier %d\n", vf_id);
4292 ret = -EINVAL;
4293 goto err_out;
4294 }
4295 vf = &pf->vf[vf_id];
4296 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4297 if (!vsi)
4298 ret = -EINVAL;
4299 err_out:
4300 return ret;
4301 }
4302
4303 /**
4304 * i40e_check_vf_init_timeout
4305 * @vf: the virtual function
4306 *
4307 * Check that the VF's initialization was successfully done and if not
4308 * wait up to 300ms for its finish.
4309 *
4310 * Returns true when VF is initialized, false on timeout
4311 **/
4312 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4313 {
4314 int i;
4315
4316 /* When the VF is resetting wait until it is done.
4317 * It can take up to 200 milliseconds, but wait for
4318 * up to 300 milliseconds to be safe.
4319 */
4320 for (i = 0; i < 15; i++) {
4321 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4322 return true;
4323 msleep(20);
4324 }
4325
4326 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4327 dev_err(&vf->pf->pdev->dev,
4328 "VF %d still in reset. Try again.\n", vf->vf_id);
4329 return false;
4330 }
4331
4332 return true;
4333 }
4334
4335 /**
4336 * i40e_ndo_set_vf_mac
4337 * @netdev: network interface device structure
4338 * @vf_id: VF identifier
4339 * @mac: mac address
4340 *
4341 * program VF mac address
4342 **/
4343 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4344 {
4345 struct i40e_netdev_priv *np = netdev_priv(netdev);
4346 struct i40e_vsi *vsi = np->vsi;
4347 struct i40e_pf *pf = vsi->back;
4348 struct i40e_mac_filter *f;
4349 struct i40e_vf *vf;
4350 int ret = 0;
4351 struct hlist_node *h;
4352 int bkt;
4353
4354 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4355 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4356 return -EAGAIN;
4357 }
4358
4359 /* validate the request */
4360 ret = i40e_validate_vf(pf, vf_id);
4361 if (ret)
4362 goto error_param;
4363
4364 vf = &pf->vf[vf_id];
4365 if (!i40e_check_vf_init_timeout(vf)) {
4366 ret = -EAGAIN;
4367 goto error_param;
4368 }
4369 vsi = pf->vsi[vf->lan_vsi_idx];
4370
4371 if (is_multicast_ether_addr(mac)) {
4372 dev_err(&pf->pdev->dev,
4373 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4374 ret = -EINVAL;
4375 goto error_param;
4376 }
4377
4378 /* Lock once because below invoked function add/del_filter requires
4379 * mac_filter_hash_lock to be held
4380 */
4381 spin_lock_bh(&vsi->mac_filter_hash_lock);
4382
4383 /* delete the temporary mac address */
4384 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4385 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4386
4387 /* Delete all the filters for this VSI - we're going to kill it
4388 * anyway.
4389 */
4390 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4391 __i40e_del_filter(vsi, f);
4392
4393 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4394
4395 /* program mac filter */
4396 if (i40e_sync_vsi_filters(vsi)) {
4397 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4398 ret = -EIO;
4399 goto error_param;
4400 }
4401 ether_addr_copy(vf->default_lan_addr.addr, mac);
4402
4403 if (is_zero_ether_addr(mac)) {
4404 vf->pf_set_mac = false;
4405 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4406 } else {
4407 vf->pf_set_mac = true;
4408 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4409 mac, vf_id);
4410 }
4411
4412 /* Force the VF interface down so it has to bring up with new MAC
4413 * address
4414 */
4415 i40e_vc_reset_vf(vf, true);
4416 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4417
4418 error_param:
4419 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4420 return ret;
4421 }
4422
4423 /**
4424 * i40e_ndo_set_vf_port_vlan
4425 * @netdev: network interface device structure
4426 * @vf_id: VF identifier
4427 * @vlan_id: mac address
4428 * @qos: priority setting
4429 * @vlan_proto: vlan protocol
4430 *
4431 * program VF vlan id and/or qos
4432 **/
4433 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4434 u16 vlan_id, u8 qos, __be16 vlan_proto)
4435 {
4436 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4437 struct i40e_netdev_priv *np = netdev_priv(netdev);
4438 bool allmulti = false, alluni = false;
4439 struct i40e_pf *pf = np->vsi->back;
4440 struct i40e_vsi *vsi;
4441 struct i40e_vf *vf;
4442 int ret = 0;
4443
4444 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4445 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4446 return -EAGAIN;
4447 }
4448
4449 /* validate the request */
4450 ret = i40e_validate_vf(pf, vf_id);
4451 if (ret)
4452 goto error_pvid;
4453
4454 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4455 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4456 ret = -EINVAL;
4457 goto error_pvid;
4458 }
4459
4460 if (vlan_proto != htons(ETH_P_8021Q)) {
4461 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4462 ret = -EPROTONOSUPPORT;
4463 goto error_pvid;
4464 }
4465
4466 vf = &pf->vf[vf_id];
4467 if (!i40e_check_vf_init_timeout(vf)) {
4468 ret = -EAGAIN;
4469 goto error_pvid;
4470 }
4471 vsi = pf->vsi[vf->lan_vsi_idx];
4472
4473 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4474 /* duplicate request, so just return success */
4475 goto error_pvid;
4476
4477 i40e_vlan_stripping_enable(vsi);
4478 i40e_vc_reset_vf(vf, true);
4479 /* During reset the VF got a new VSI, so refresh a pointer. */
4480 vsi = pf->vsi[vf->lan_vsi_idx];
4481 /* Locked once because multiple functions below iterate list */
4482 spin_lock_bh(&vsi->mac_filter_hash_lock);
4483
4484 /* Check for condition where there was already a port VLAN ID
4485 * filter set and now it is being deleted by setting it to zero.
4486 * Additionally check for the condition where there was a port
4487 * VLAN but now there is a new and different port VLAN being set.
4488 * Before deleting all the old VLAN filters we must add new ones
4489 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4490 * MAC addresses deleted.
4491 */
4492 if ((!(vlan_id || qos) ||
4493 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4494 vsi->info.pvid) {
4495 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4496 if (ret) {
4497 dev_info(&vsi->back->pdev->dev,
4498 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4499 vsi->back->hw.aq.asq_last_status);
4500 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4501 goto error_pvid;
4502 }
4503 }
4504
4505 if (vsi->info.pvid) {
4506 /* remove all filters on the old VLAN */
4507 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4508 VLAN_VID_MASK));
4509 }
4510
4511 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4512
4513 /* disable promisc modes in case they were enabled */
4514 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4515 allmulti, alluni);
4516 if (ret) {
4517 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4518 goto error_pvid;
4519 }
4520
4521 if (vlan_id || qos)
4522 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4523 else
4524 i40e_vsi_remove_pvid(vsi);
4525 spin_lock_bh(&vsi->mac_filter_hash_lock);
4526
4527 if (vlan_id) {
4528 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4529 vlan_id, qos, vf_id);
4530
4531 /* add new VLAN filter for each MAC */
4532 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4533 if (ret) {
4534 dev_info(&vsi->back->pdev->dev,
4535 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4536 vsi->back->hw.aq.asq_last_status);
4537 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4538 goto error_pvid;
4539 }
4540
4541 /* remove the previously added non-VLAN MAC filters */
4542 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4543 }
4544
4545 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4546
4547 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4548 alluni = true;
4549
4550 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4551 allmulti = true;
4552
4553 /* Schedule the worker thread to take care of applying changes */
4554 i40e_service_event_schedule(vsi->back);
4555
4556 if (ret) {
4557 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4558 goto error_pvid;
4559 }
4560
4561 /* The Port VLAN needs to be saved across resets the same as the
4562 * default LAN MAC address.
4563 */
4564 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4565
4566 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4567 if (ret) {
4568 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4569 goto error_pvid;
4570 }
4571
4572 ret = 0;
4573
4574 error_pvid:
4575 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4576 return ret;
4577 }
4578
4579 /**
4580 * i40e_ndo_set_vf_bw
4581 * @netdev: network interface device structure
4582 * @vf_id: VF identifier
4583 * @min_tx_rate: Minimum Tx rate
4584 * @max_tx_rate: Maximum Tx rate
4585 *
4586 * configure VF Tx rate
4587 **/
4588 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4589 int max_tx_rate)
4590 {
4591 struct i40e_netdev_priv *np = netdev_priv(netdev);
4592 struct i40e_pf *pf = np->vsi->back;
4593 struct i40e_vsi *vsi;
4594 struct i40e_vf *vf;
4595 int ret = 0;
4596
4597 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4598 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4599 return -EAGAIN;
4600 }
4601
4602 /* validate the request */
4603 ret = i40e_validate_vf(pf, vf_id);
4604 if (ret)
4605 goto error;
4606
4607 if (min_tx_rate) {
4608 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4609 min_tx_rate, vf_id);
4610 ret = -EINVAL;
4611 goto error;
4612 }
4613
4614 vf = &pf->vf[vf_id];
4615 if (!i40e_check_vf_init_timeout(vf)) {
4616 ret = -EAGAIN;
4617 goto error;
4618 }
4619 vsi = pf->vsi[vf->lan_vsi_idx];
4620
4621 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4622 if (ret)
4623 goto error;
4624
4625 vf->tx_rate = max_tx_rate;
4626 error:
4627 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4628 return ret;
4629 }
4630
4631 /**
4632 * i40e_ndo_get_vf_config
4633 * @netdev: network interface device structure
4634 * @vf_id: VF identifier
4635 * @ivi: VF configuration structure
4636 *
4637 * return VF configuration
4638 **/
4639 int i40e_ndo_get_vf_config(struct net_device *netdev,
4640 int vf_id, struct ifla_vf_info *ivi)
4641 {
4642 struct i40e_netdev_priv *np = netdev_priv(netdev);
4643 struct i40e_vsi *vsi = np->vsi;
4644 struct i40e_pf *pf = vsi->back;
4645 struct i40e_vf *vf;
4646 int ret = 0;
4647
4648 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4649 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4650 return -EAGAIN;
4651 }
4652
4653 /* validate the request */
4654 ret = i40e_validate_vf(pf, vf_id);
4655 if (ret)
4656 goto error_param;
4657
4658 vf = &pf->vf[vf_id];
4659 /* first vsi is always the LAN vsi */
4660 vsi = pf->vsi[vf->lan_vsi_idx];
4661 if (!vsi) {
4662 ret = -ENOENT;
4663 goto error_param;
4664 }
4665
4666 ivi->vf = vf_id;
4667
4668 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4669
4670 ivi->max_tx_rate = vf->tx_rate;
4671 ivi->min_tx_rate = 0;
4672 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4673 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4674 I40E_VLAN_PRIORITY_SHIFT;
4675 if (vf->link_forced == false)
4676 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4677 else if (vf->link_up == true)
4678 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4679 else
4680 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4681 ivi->spoofchk = vf->spoofchk;
4682 ivi->trusted = vf->trusted;
4683 ret = 0;
4684
4685 error_param:
4686 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4687 return ret;
4688 }
4689
4690 /**
4691 * i40e_ndo_set_vf_link_state
4692 * @netdev: network interface device structure
4693 * @vf_id: VF identifier
4694 * @link: required link state
4695 *
4696 * Set the link state of a specified VF, regardless of physical link state
4697 **/
4698 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4699 {
4700 struct i40e_netdev_priv *np = netdev_priv(netdev);
4701 struct i40e_pf *pf = np->vsi->back;
4702 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4703 struct virtchnl_pf_event pfe;
4704 struct i40e_hw *hw = &pf->hw;
4705 struct i40e_vf *vf;
4706 int abs_vf_id;
4707 int ret = 0;
4708
4709 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4710 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4711 return -EAGAIN;
4712 }
4713
4714 /* validate the request */
4715 if (vf_id >= pf->num_alloc_vfs) {
4716 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4717 ret = -EINVAL;
4718 goto error_out;
4719 }
4720
4721 vf = &pf->vf[vf_id];
4722 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4723
4724 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4725 pfe.severity = PF_EVENT_SEVERITY_INFO;
4726
4727 switch (link) {
4728 case IFLA_VF_LINK_STATE_AUTO:
4729 vf->link_forced = false;
4730 i40e_set_vf_link_state(vf, &pfe, ls);
4731 break;
4732 case IFLA_VF_LINK_STATE_ENABLE:
4733 vf->link_forced = true;
4734 vf->link_up = true;
4735 i40e_set_vf_link_state(vf, &pfe, ls);
4736 break;
4737 case IFLA_VF_LINK_STATE_DISABLE:
4738 vf->link_forced = true;
4739 vf->link_up = false;
4740 i40e_set_vf_link_state(vf, &pfe, ls);
4741 break;
4742 default:
4743 ret = -EINVAL;
4744 goto error_out;
4745 }
4746 /* Notify the VF of its new link state */
4747 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4748 0, (u8 *)&pfe, sizeof(pfe), NULL);
4749
4750 error_out:
4751 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4752 return ret;
4753 }
4754
4755 /**
4756 * i40e_ndo_set_vf_spoofchk
4757 * @netdev: network interface device structure
4758 * @vf_id: VF identifier
4759 * @enable: flag to enable or disable feature
4760 *
4761 * Enable or disable VF spoof checking
4762 **/
4763 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4764 {
4765 struct i40e_netdev_priv *np = netdev_priv(netdev);
4766 struct i40e_vsi *vsi = np->vsi;
4767 struct i40e_pf *pf = vsi->back;
4768 struct i40e_vsi_context ctxt;
4769 struct i40e_hw *hw = &pf->hw;
4770 struct i40e_vf *vf;
4771 int ret = 0;
4772
4773 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4774 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4775 return -EAGAIN;
4776 }
4777
4778 /* validate the request */
4779 if (vf_id >= pf->num_alloc_vfs) {
4780 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4781 ret = -EINVAL;
4782 goto out;
4783 }
4784
4785 vf = &(pf->vf[vf_id]);
4786 if (!i40e_check_vf_init_timeout(vf)) {
4787 ret = -EAGAIN;
4788 goto out;
4789 }
4790
4791 if (enable == vf->spoofchk)
4792 goto out;
4793
4794 vf->spoofchk = enable;
4795 memset(&ctxt, 0, sizeof(ctxt));
4796 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4797 ctxt.pf_num = pf->hw.pf_id;
4798 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4799 if (enable)
4800 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4801 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4802 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4803 if (ret) {
4804 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4805 ret);
4806 ret = -EIO;
4807 }
4808 out:
4809 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4810 return ret;
4811 }
4812
4813 /**
4814 * i40e_ndo_set_vf_trust
4815 * @netdev: network interface device structure of the pf
4816 * @vf_id: VF identifier
4817 * @setting: trust setting
4818 *
4819 * Enable or disable VF trust setting
4820 **/
4821 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4822 {
4823 struct i40e_netdev_priv *np = netdev_priv(netdev);
4824 struct i40e_pf *pf = np->vsi->back;
4825 struct i40e_vf *vf;
4826 int ret = 0;
4827
4828 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4829 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4830 return -EAGAIN;
4831 }
4832
4833 /* validate the request */
4834 if (vf_id >= pf->num_alloc_vfs) {
4835 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4836 ret = -EINVAL;
4837 goto out;
4838 }
4839
4840 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4841 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4842 ret = -EINVAL;
4843 goto out;
4844 }
4845
4846 vf = &pf->vf[vf_id];
4847
4848 if (setting == vf->trusted)
4849 goto out;
4850
4851 vf->trusted = setting;
4852
4853 /* request PF to sync mac/vlan filters for the VF */
4854 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4855 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4856
4857 i40e_vc_reset_vf(vf, true);
4858 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4859 vf_id, setting ? "" : "un");
4860
4861 if (vf->adq_enabled) {
4862 if (!vf->trusted) {
4863 dev_info(&pf->pdev->dev,
4864 "VF %u no longer Trusted, deleting all cloud filters\n",
4865 vf_id);
4866 i40e_del_all_cloud_filters(vf);
4867 }
4868 }
4869
4870 out:
4871 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4872 return ret;
4873 }
4874
4875 /**
4876 * i40e_get_vf_stats - populate some stats for the VF
4877 * @netdev: the netdev of the PF
4878 * @vf_id: the host OS identifier (0-127)
4879 * @vf_stats: pointer to the OS memory to be initialized
4880 */
4881 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4882 struct ifla_vf_stats *vf_stats)
4883 {
4884 struct i40e_netdev_priv *np = netdev_priv(netdev);
4885 struct i40e_pf *pf = np->vsi->back;
4886 struct i40e_eth_stats *stats;
4887 struct i40e_vsi *vsi;
4888 struct i40e_vf *vf;
4889
4890 /* validate the request */
4891 if (i40e_validate_vf(pf, vf_id))
4892 return -EINVAL;
4893
4894 vf = &pf->vf[vf_id];
4895 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4896 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4897 return -EBUSY;
4898 }
4899
4900 vsi = pf->vsi[vf->lan_vsi_idx];
4901 if (!vsi)
4902 return -EINVAL;
4903
4904 i40e_update_eth_stats(vsi);
4905 stats = &vsi->eth_stats;
4906
4907 memset(vf_stats, 0, sizeof(*vf_stats));
4908
4909 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4910 stats->rx_multicast;
4911 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4912 stats->tx_multicast;
4913 vf_stats->rx_bytes = stats->rx_bytes;
4914 vf_stats->tx_bytes = stats->tx_bytes;
4915 vf_stats->broadcast = stats->rx_broadcast;
4916 vf_stats->multicast = stats->rx_multicast;
4917 vf_stats->rx_dropped = stats->rx_discards;
4918 vf_stats->tx_dropped = stats->tx_discards;
4919
4920 return 0;
4921 }