1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
19 enum virtchnl_ops v_opcode
,
20 int v_retval
, u8
*msg
,
23 struct i40e_hw
*hw
= &pf
->hw
;
24 struct i40e_vf
*vf
= pf
->vf
;
27 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++, vf
++) {
28 int abs_vf_id
= vf
->vf_id
+ (int)hw
->func_caps
.vf_base_id
;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
) &&
31 !test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, v_opcode
, v_retval
,
43 * i40e_vc_link_speed2mbps
44 * converts i40e_aq_link_speed to integer value of Mbps
45 * @link_speed: the speed to convert
47 * return the speed as direct value of Mbps.
50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed
)
53 case I40E_LINK_SPEED_100MB
:
55 case I40E_LINK_SPEED_1GB
:
57 case I40E_LINK_SPEED_2_5GB
:
59 case I40E_LINK_SPEED_5GB
:
61 case I40E_LINK_SPEED_10GB
:
63 case I40E_LINK_SPEED_20GB
:
65 case I40E_LINK_SPEED_25GB
:
67 case I40E_LINK_SPEED_40GB
:
69 case I40E_LINK_SPEED_UNKNOWN
:
76 * i40e_set_vf_link_state
77 * @vf: pointer to the VF structure
78 * @pfe: pointer to PF event structure
79 * @ls: pointer to link status structure
81 * set a link state on a single vf
83 static void i40e_set_vf_link_state(struct i40e_vf
*vf
,
84 struct virtchnl_pf_event
*pfe
, struct i40e_link_status
*ls
)
86 u8 link_status
= ls
->link_info
& I40E_AQ_LINK_UP
;
89 link_status
= vf
->link_up
;
91 if (vf
->driver_caps
& VIRTCHNL_VF_CAP_ADV_LINK_SPEED
) {
92 pfe
->event_data
.link_event_adv
.link_speed
= link_status
?
93 i40e_vc_link_speed2mbps(ls
->link_speed
) : 0;
94 pfe
->event_data
.link_event_adv
.link_status
= link_status
;
96 pfe
->event_data
.link_event
.link_speed
= link_status
?
97 i40e_virtchnl_link_speed(ls
->link_speed
) : 0;
98 pfe
->event_data
.link_event
.link_status
= link_status
;
103 * i40e_vc_notify_vf_link_state
104 * @vf: pointer to the VF structure
106 * send a link status message to a single VF
108 static void i40e_vc_notify_vf_link_state(struct i40e_vf
*vf
)
110 struct virtchnl_pf_event pfe
;
111 struct i40e_pf
*pf
= vf
->pf
;
112 struct i40e_hw
*hw
= &pf
->hw
;
113 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
114 int abs_vf_id
= vf
->vf_id
+ (int)hw
->func_caps
.vf_base_id
;
116 pfe
.event
= VIRTCHNL_EVENT_LINK_CHANGE
;
117 pfe
.severity
= PF_EVENT_SEVERITY_INFO
;
119 i40e_set_vf_link_state(vf
, &pfe
, ls
);
121 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
122 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);
126 * i40e_vc_notify_link_state
127 * @pf: pointer to the PF structure
129 * send a link status message to all VFs on a given PF
131 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
135 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++)
136 i40e_vc_notify_vf_link_state(&pf
->vf
[i
]);
140 * i40e_vc_notify_reset
141 * @pf: pointer to the PF structure
143 * indicate a pending reset to all VFs on a given PF
145 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
147 struct virtchnl_pf_event pfe
;
149 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
150 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
151 i40e_vc_vf_broadcast(pf
, VIRTCHNL_OP_EVENT
, 0,
152 (u8
*)&pfe
, sizeof(struct virtchnl_pf_event
));
156 * i40e_vc_notify_vf_reset
157 * @vf: pointer to the VF structure
159 * indicate a pending reset to the given VF
161 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
163 struct virtchnl_pf_event pfe
;
166 /* validate the request */
167 if (!vf
|| vf
->vf_id
>= vf
->pf
->num_alloc_vfs
)
170 /* verify if the VF is in either init or active before proceeding */
171 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
) &&
172 !test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
))
175 abs_vf_id
= vf
->vf_id
+ (int)vf
->pf
->hw
.func_caps
.vf_base_id
;
177 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
178 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
179 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
181 sizeof(struct virtchnl_pf_event
), NULL
);
183 /***********************misc routines*****************************/
187 * @vf: pointer to the VF info
188 * @notify_vf: notify vf about reset or not
191 static void i40e_vc_reset_vf(struct i40e_vf
*vf
, bool notify_vf
)
193 struct i40e_pf
*pf
= vf
->pf
;
197 i40e_vc_notify_vf_reset(vf
);
199 /* We want to ensure that an actual reset occurs initiated after this
200 * function was called. However, we do not want to wait forever, so
201 * we'll give a reasonable time and print a message if we failed to
204 for (i
= 0; i
< 20; i
++) {
205 /* If PF is in VFs releasing state reset VF is impossible,
208 if (test_bit(__I40E_VFS_RELEASING
, pf
->state
))
210 if (i40e_reset_vf(vf
, false))
212 usleep_range(10000, 20000);
216 dev_warn(&vf
->pf
->pdev
->dev
,
217 "Failed to initiate reset for VF %d after 200 milliseconds\n",
220 dev_dbg(&vf
->pf
->pdev
->dev
,
221 "Failed to initiate reset for VF %d after 200 milliseconds\n",
226 * i40e_vc_isvalid_vsi_id
227 * @vf: pointer to the VF info
228 * @vsi_id: VF relative VSI id
230 * check for the valid VSI id
232 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u16 vsi_id
)
234 struct i40e_pf
*pf
= vf
->pf
;
235 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
237 return (vsi
&& (vsi
->vf_id
== vf
->vf_id
));
241 * i40e_vc_isvalid_queue_id
242 * @vf: pointer to the VF info
244 * @qid: vsi relative queue id
246 * check for the valid queue id
248 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u16 vsi_id
,
251 struct i40e_pf
*pf
= vf
->pf
;
252 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
254 return (vsi
&& (qid
< vsi
->alloc_queue_pairs
));
258 * i40e_vc_isvalid_vector_id
259 * @vf: pointer to the VF info
260 * @vector_id: VF relative vector id
262 * check for the valid vector id
264 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u32 vector_id
)
266 struct i40e_pf
*pf
= vf
->pf
;
268 return vector_id
< pf
->hw
.func_caps
.num_msix_vectors_vf
;
271 /***********************vf resource mgmt routines*****************/
274 * i40e_vc_get_pf_queue_id
275 * @vf: pointer to the VF info
276 * @vsi_id: id of VSI as provided by the FW
277 * @vsi_queue_id: vsi relative queue id
279 * return PF relative queue id
281 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u16 vsi_id
,
284 struct i40e_pf
*pf
= vf
->pf
;
285 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
286 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
291 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
292 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
294 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
296 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
303 * i40e_get_real_pf_qid
304 * @vf: pointer to the VF info
306 * @queue_id: queue number
308 * wrapper function to get pf_queue_id handling ADq code as well
310 static u16
i40e_get_real_pf_qid(struct i40e_vf
*vf
, u16 vsi_id
, u16 queue_id
)
314 if (vf
->adq_enabled
) {
315 /* Although VF considers all the queues(can be 1 to 16) as its
316 * own but they may actually belong to different VSIs(up to 4).
317 * We need to find which queues belongs to which VSI.
319 for (i
= 0; i
< vf
->num_tc
; i
++) {
320 if (queue_id
< vf
->ch
[i
].num_qps
) {
321 vsi_id
= vf
->ch
[i
].vsi_id
;
324 /* find right queue id which is relative to a
327 queue_id
-= vf
->ch
[i
].num_qps
;
331 return i40e_vc_get_pf_queue_id(vf
, vsi_id
, queue_id
);
335 * i40e_config_irq_link_list
336 * @vf: pointer to the VF info
337 * @vsi_id: id of VSI as given by the FW
338 * @vecmap: irq map info
340 * configure irq link list from the map
342 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_id
,
343 struct virtchnl_vector_map
*vecmap
)
345 unsigned long linklistmap
= 0, tempmap
;
346 struct i40e_pf
*pf
= vf
->pf
;
347 struct i40e_hw
*hw
= &pf
->hw
;
348 u16 vsi_queue_id
, pf_queue_id
;
349 enum i40e_queue_type qtype
;
350 u16 next_q
, vector_id
, size
;
354 vector_id
= vecmap
->vector_id
;
357 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
359 reg_idx
= I40E_VPINT_LNKLSTN(
360 ((pf
->hw
.func_caps
.num_msix_vectors_vf
- 1) * vf
->vf_id
) +
363 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
364 /* Special case - No queues mapped on this vector */
365 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
368 tempmap
= vecmap
->rxq_map
;
369 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
370 linklistmap
|= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES
*
374 tempmap
= vecmap
->txq_map
;
375 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
376 linklistmap
|= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES
*
380 size
= I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
;
381 next_q
= find_first_bit(&linklistmap
, size
);
382 if (unlikely(next_q
== size
))
385 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
386 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
387 pf_queue_id
= i40e_get_real_pf_qid(vf
, vsi_id
, vsi_queue_id
);
388 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
390 wr32(hw
, reg_idx
, reg
);
392 while (next_q
< size
) {
394 case I40E_QUEUE_TYPE_RX
:
395 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
396 itr_idx
= vecmap
->rxitr_idx
;
398 case I40E_QUEUE_TYPE_TX
:
399 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
400 itr_idx
= vecmap
->txitr_idx
;
406 next_q
= find_next_bit(&linklistmap
, size
, next_q
+ 1);
408 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
409 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
410 pf_queue_id
= i40e_get_real_pf_qid(vf
,
414 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
418 /* format for the RQCTL & TQCTL regs is same */
420 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
421 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
422 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
423 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
424 wr32(hw
, reg_idx
, reg
);
427 /* if the vf is running in polling mode and using interrupt zero,
428 * need to disable auto-mask on enabling zero interrupt for VFs.
430 if ((vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RX_POLLING
) &&
432 reg
= rd32(hw
, I40E_GLINT_CTL
);
433 if (!(reg
& I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK
)) {
434 reg
|= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK
;
435 wr32(hw
, I40E_GLINT_CTL
, reg
);
444 * i40e_release_rdma_qvlist
445 * @vf: pointer to the VF.
448 static void i40e_release_rdma_qvlist(struct i40e_vf
*vf
)
450 struct i40e_pf
*pf
= vf
->pf
;
451 struct virtchnl_rdma_qvlist_info
*qvlist_info
= vf
->qvlist_info
;
455 if (!vf
->qvlist_info
)
458 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
459 for (i
= 0; i
< qvlist_info
->num_vectors
; i
++) {
460 struct virtchnl_rdma_qv_info
*qv_info
;
461 u32 next_q_index
, next_q_type
;
462 struct i40e_hw
*hw
= &pf
->hw
;
463 u32 v_idx
, reg_idx
, reg
;
465 qv_info
= &qvlist_info
->qv_info
[i
];
468 v_idx
= qv_info
->v_idx
;
469 if (qv_info
->ceq_idx
!= I40E_QUEUE_INVALID_IDX
) {
470 /* Figure out the queue after CEQ and make that the
473 reg_idx
= (msix_vf
- 1) * vf
->vf_id
+ qv_info
->ceq_idx
;
474 reg
= rd32(hw
, I40E_VPINT_CEQCTL(reg_idx
));
475 next_q_index
= (reg
& I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK
)
476 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT
;
477 next_q_type
= (reg
& I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK
)
478 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT
;
480 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
481 reg
= (next_q_index
&
482 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
) |
484 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
);
486 wr32(hw
, I40E_VPINT_LNKLSTN(reg_idx
), reg
);
489 kfree(vf
->qvlist_info
);
490 vf
->qvlist_info
= NULL
;
494 * i40e_config_rdma_qvlist
495 * @vf: pointer to the VF info
496 * @qvlist_info: queue and vector list
498 * Return 0 on success or < 0 on error
501 i40e_config_rdma_qvlist(struct i40e_vf
*vf
,
502 struct virtchnl_rdma_qvlist_info
*qvlist_info
)
504 struct i40e_pf
*pf
= vf
->pf
;
505 struct i40e_hw
*hw
= &pf
->hw
;
506 struct virtchnl_rdma_qv_info
*qv_info
;
507 u32 v_idx
, i
, reg_idx
, reg
;
508 u32 next_q_idx
, next_q_type
;
513 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
515 if (qvlist_info
->num_vectors
> msix_vf
) {
516 dev_warn(&pf
->pdev
->dev
,
517 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
518 qvlist_info
->num_vectors
,
524 kfree(vf
->qvlist_info
);
525 size
= virtchnl_struct_size(vf
->qvlist_info
, qv_info
,
526 qvlist_info
->num_vectors
);
527 vf
->qvlist_info
= kzalloc(size
, GFP_KERNEL
);
528 if (!vf
->qvlist_info
) {
532 vf
->qvlist_info
->num_vectors
= qvlist_info
->num_vectors
;
534 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
535 for (i
= 0; i
< qvlist_info
->num_vectors
; i
++) {
536 qv_info
= &qvlist_info
->qv_info
[i
];
540 /* Validate vector id belongs to this vf */
541 if (!i40e_vc_isvalid_vector_id(vf
, qv_info
->v_idx
)) {
546 v_idx
= qv_info
->v_idx
;
548 vf
->qvlist_info
->qv_info
[i
] = *qv_info
;
550 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
551 /* We might be sharing the interrupt, so get the first queue
552 * index and type, push it down the list by adding the new
553 * queue on top. Also link it with the new queue in CEQCTL.
555 reg
= rd32(hw
, I40E_VPINT_LNKLSTN(reg_idx
));
556 next_q_idx
= ((reg
& I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
) >>
557 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT
);
558 next_q_type
= ((reg
& I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
) >>
559 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
);
561 if (qv_info
->ceq_idx
!= I40E_QUEUE_INVALID_IDX
) {
562 reg_idx
= (msix_vf
- 1) * vf
->vf_id
+ qv_info
->ceq_idx
;
563 reg
= (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK
|
564 (v_idx
<< I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT
) |
565 (qv_info
->itr_idx
<< I40E_VPINT_CEQCTL_ITR_INDX_SHIFT
) |
566 (next_q_type
<< I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT
) |
567 (next_q_idx
<< I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT
));
568 wr32(hw
, I40E_VPINT_CEQCTL(reg_idx
), reg
);
570 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
571 reg
= (qv_info
->ceq_idx
&
572 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
) |
573 (I40E_QUEUE_TYPE_PE_CEQ
<<
574 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
);
575 wr32(hw
, I40E_VPINT_LNKLSTN(reg_idx
), reg
);
578 if (qv_info
->aeq_idx
!= I40E_QUEUE_INVALID_IDX
) {
579 reg
= (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK
|
580 (v_idx
<< I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT
) |
581 (qv_info
->itr_idx
<< I40E_VPINT_AEQCTL_ITR_INDX_SHIFT
));
583 wr32(hw
, I40E_VPINT_AEQCTL(vf
->vf_id
), reg
);
589 kfree(vf
->qvlist_info
);
590 vf
->qvlist_info
= NULL
;
596 * i40e_config_vsi_tx_queue
597 * @vf: pointer to the VF info
598 * @vsi_id: id of VSI as provided by the FW
599 * @vsi_queue_id: vsi relative queue index
600 * @info: config. info
604 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_id
,
606 struct virtchnl_txq_info
*info
)
608 struct i40e_pf
*pf
= vf
->pf
;
609 struct i40e_hw
*hw
= &pf
->hw
;
610 struct i40e_hmc_obj_txq tx_ctx
;
611 struct i40e_vsi
*vsi
;
616 if (!i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
)) {
620 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, vsi_queue_id
);
621 vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
627 /* clear the context structure first */
628 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
630 /* only set the required fields */
631 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
632 tx_ctx
.qlen
= info
->ring_len
;
633 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[0]);
634 tx_ctx
.rdylist_act
= 0;
635 tx_ctx
.head_wb_ena
= info
->headwb_enabled
;
636 tx_ctx
.head_wb_addr
= info
->dma_headwb_addr
;
638 /* clear the context in the HMC */
639 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
641 dev_err(&pf
->pdev
->dev
,
642 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
648 /* set the context in the HMC */
649 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
651 dev_err(&pf
->pdev
->dev
,
652 "Failed to set VF LAN Tx queue context %d error: %d\n",
658 /* associate this queue with the PCI VF function */
659 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
660 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
)
661 & I40E_QTX_CTL_PF_INDX_MASK
);
662 qtx_ctl
|= (((vf
->vf_id
+ hw
->func_caps
.vf_base_id
)
663 << I40E_QTX_CTL_VFVM_INDX_SHIFT
)
664 & I40E_QTX_CTL_VFVM_INDX_MASK
);
665 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
673 * i40e_config_vsi_rx_queue
674 * @vf: pointer to the VF info
675 * @vsi_id: id of VSI as provided by the FW
676 * @vsi_queue_id: vsi relative queue index
677 * @info: config. info
681 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_id
,
683 struct virtchnl_rxq_info
*info
)
685 u16 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, vsi_queue_id
);
686 struct i40e_pf
*pf
= vf
->pf
;
687 struct i40e_vsi
*vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
688 struct i40e_hw
*hw
= &pf
->hw
;
689 struct i40e_hmc_obj_rxq rx_ctx
;
692 /* clear the context structure first */
693 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
695 /* only set the required fields */
696 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
697 rx_ctx
.qlen
= info
->ring_len
;
699 if (info
->splithdr_enabled
) {
700 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
702 I40E_RX_SPLIT_TCP_UDP
|
704 /* header length validation */
705 if (info
->hdr_size
> ((2 * 1024) - 64)) {
709 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
711 /* set split mode 10b */
712 rx_ctx
.dtype
= I40E_RX_DTYPE_HEADER_SPLIT
;
715 /* databuffer length validation */
716 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
720 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
722 /* max pkt. length validation */
723 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
727 rx_ctx
.rxmax
= info
->max_pkt_size
;
729 /* if port VLAN is configured increase the max packet size */
731 rx_ctx
.rxmax
+= VLAN_HLEN
;
733 /* enable 32bytes desc always */
737 rx_ctx
.lrxqthresh
= 1;
742 /* clear the context in the HMC */
743 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
745 dev_err(&pf
->pdev
->dev
,
746 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
752 /* set the context in the HMC */
753 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
755 dev_err(&pf
->pdev
->dev
,
756 "Failed to set VF LAN Rx queue context %d error: %d\n",
768 * @vf: pointer to the VF info
769 * @idx: VSI index, applies only for ADq mode, zero otherwise
771 * alloc VF vsi context & resources
773 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, u8 idx
)
775 struct i40e_mac_filter
*f
= NULL
;
776 struct i40e_pf
*pf
= vf
->pf
;
777 struct i40e_vsi
*vsi
;
781 vsi
= i40e_vsi_setup(pf
, I40E_VSI_SRIOV
, pf
->vsi
[pf
->lan_vsi
]->seid
,
785 dev_err(&pf
->pdev
->dev
,
786 "add vsi failed for VF %d, aq_err %d\n",
787 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
789 goto error_alloc_vsi_res
;
793 u64 hena
= i40e_pf_get_default_rss_hena(pf
);
794 u8 broadcast
[ETH_ALEN
];
796 vf
->lan_vsi_idx
= vsi
->idx
;
797 vf
->lan_vsi_id
= vsi
->id
;
798 /* If the port VLAN has been configured and then the
799 * VF driver was removed then the VSI port VLAN
800 * configuration was destroyed. Check if there is
801 * a port VLAN and restore the VSI configuration if
804 if (vf
->port_vlan_id
)
805 i40e_vsi_add_pvid(vsi
, vf
->port_vlan_id
);
807 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
808 if (is_valid_ether_addr(vf
->default_lan_addr
.addr
)) {
809 f
= i40e_add_mac_filter(vsi
,
810 vf
->default_lan_addr
.addr
);
812 dev_info(&pf
->pdev
->dev
,
813 "Could not add MAC filter %pM for VF %d\n",
814 vf
->default_lan_addr
.addr
, vf
->vf_id
);
816 eth_broadcast_addr(broadcast
);
817 f
= i40e_add_mac_filter(vsi
, broadcast
);
819 dev_info(&pf
->pdev
->dev
,
820 "Could not allocate VF broadcast filter\n");
821 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
822 wr32(&pf
->hw
, I40E_VFQF_HENA1(0, vf
->vf_id
), (u32
)hena
);
823 wr32(&pf
->hw
, I40E_VFQF_HENA1(1, vf
->vf_id
), (u32
)(hena
>> 32));
824 /* program mac filter only for VF VSI */
825 ret
= i40e_sync_vsi_filters(vsi
);
827 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
830 /* storing VSI index and id for ADq and don't apply the mac filter */
831 if (vf
->adq_enabled
) {
832 vf
->ch
[idx
].vsi_idx
= vsi
->idx
;
833 vf
->ch
[idx
].vsi_id
= vsi
->id
;
836 /* Set VF bandwidth if specified */
838 max_tx_rate
= vf
->tx_rate
;
839 } else if (vf
->ch
[idx
].max_tx_rate
) {
840 max_tx_rate
= vf
->ch
[idx
].max_tx_rate
;
844 max_tx_rate
= div_u64(max_tx_rate
, I40E_BW_CREDIT_DIVISOR
);
845 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
,
846 max_tx_rate
, 0, NULL
);
848 dev_err(&pf
->pdev
->dev
, "Unable to set tx rate, VF %d, error code %d.\n",
857 * i40e_map_pf_queues_to_vsi
858 * @vf: pointer to the VF info
860 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
861 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
863 static void i40e_map_pf_queues_to_vsi(struct i40e_vf
*vf
)
865 struct i40e_pf
*pf
= vf
->pf
;
866 struct i40e_hw
*hw
= &pf
->hw
;
867 u32 reg
, num_tc
= 1; /* VF has at least one traffic class */
874 for (i
= 0; i
< num_tc
; i
++) {
875 if (vf
->adq_enabled
) {
876 qps
= vf
->ch
[i
].num_qps
;
877 vsi_id
= vf
->ch
[i
].vsi_id
;
879 qps
= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
880 vsi_id
= vf
->lan_vsi_id
;
883 for (j
= 0; j
< 7; j
++) {
888 u16 qid
= i40e_vc_get_pf_queue_id(vf
,
892 qid
= i40e_vc_get_pf_queue_id(vf
, vsi_id
,
896 i40e_write_rx_ctl(hw
,
897 I40E_VSILAN_QTABLE(j
, vsi_id
),
904 * i40e_map_pf_to_vf_queues
905 * @vf: pointer to the VF info
907 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
908 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
910 static void i40e_map_pf_to_vf_queues(struct i40e_vf
*vf
)
912 struct i40e_pf
*pf
= vf
->pf
;
913 struct i40e_hw
*hw
= &pf
->hw
;
914 u32 reg
, total_qps
= 0;
915 u32 qps
, num_tc
= 1; /* VF has at least one traffic class */
922 for (i
= 0; i
< num_tc
; i
++) {
923 if (vf
->adq_enabled
) {
924 qps
= vf
->ch
[i
].num_qps
;
925 vsi_id
= vf
->ch
[i
].vsi_id
;
927 qps
= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
928 vsi_id
= vf
->lan_vsi_id
;
931 for (j
= 0; j
< qps
; j
++) {
932 qid
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, j
);
934 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
935 wr32(hw
, I40E_VPLAN_QTABLE(total_qps
, vf
->vf_id
),
943 * i40e_enable_vf_mappings
944 * @vf: pointer to the VF info
948 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
950 struct i40e_pf
*pf
= vf
->pf
;
951 struct i40e_hw
*hw
= &pf
->hw
;
954 /* Tell the hardware we're using noncontiguous mapping. HW requires
955 * that VF queues be mapped using this method, even when they are
956 * contiguous in real life
958 i40e_write_rx_ctl(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
959 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
961 /* enable VF vplan_qtable mappings */
962 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
963 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
965 i40e_map_pf_to_vf_queues(vf
);
966 i40e_map_pf_queues_to_vsi(vf
);
972 * i40e_disable_vf_mappings
973 * @vf: pointer to the VF info
975 * disable VF mappings
977 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
979 struct i40e_pf
*pf
= vf
->pf
;
980 struct i40e_hw
*hw
= &pf
->hw
;
983 /* disable qp mappings */
984 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
985 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
986 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
987 I40E_QUEUE_END_OF_LIST
);
993 * @vf: pointer to the VF info
997 static void i40e_free_vf_res(struct i40e_vf
*vf
)
999 struct i40e_pf
*pf
= vf
->pf
;
1000 struct i40e_hw
*hw
= &pf
->hw
;
1004 /* Start by disabling VF's configuration API to prevent the OS from
1005 * accessing the VF's VSI after it's freed / invalidated.
1007 clear_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1009 /* It's possible the VF had requeuested more queues than the default so
1010 * do the accounting here when we're about to free them.
1012 if (vf
->num_queue_pairs
> I40E_DEFAULT_QUEUES_PER_VF
) {
1013 pf
->queues_left
+= vf
->num_queue_pairs
-
1014 I40E_DEFAULT_QUEUES_PER_VF
;
1017 /* free vsi & disconnect it from the parent uplink */
1018 if (vf
->lan_vsi_idx
) {
1019 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_idx
]);
1020 vf
->lan_vsi_idx
= 0;
1024 /* do the accounting and remove additional ADq VSI's */
1025 if (vf
->adq_enabled
&& vf
->ch
[0].vsi_idx
) {
1026 for (j
= 0; j
< vf
->num_tc
; j
++) {
1027 /* At this point VSI0 is already released so don't
1028 * release it again and only clear their values in
1029 * structure variables
1032 i40e_vsi_release(pf
->vsi
[vf
->ch
[j
].vsi_idx
]);
1033 vf
->ch
[j
].vsi_idx
= 0;
1034 vf
->ch
[j
].vsi_id
= 0;
1037 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
1039 /* disable interrupts so the VF starts in a known state */
1040 for (i
= 0; i
< msix_vf
; i
++) {
1041 /* format is same for both registers */
1043 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
1045 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
1048 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
1052 /* clear the irq settings */
1053 for (i
= 0; i
< msix_vf
; i
++) {
1054 /* format is same for both registers */
1056 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
1058 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
1061 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
1062 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
1063 wr32(hw
, reg_idx
, reg
);
1066 /* reset some of the state variables keeping track of the resources */
1067 vf
->num_queue_pairs
= 0;
1068 clear_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
);
1069 clear_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
);
1074 * @vf: pointer to the VF info
1076 * allocate VF resources
1078 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
1080 struct i40e_pf
*pf
= vf
->pf
;
1081 int total_queue_pairs
= 0;
1084 if (vf
->num_req_queues
&&
1085 vf
->num_req_queues
<= pf
->queues_left
+ I40E_DEFAULT_QUEUES_PER_VF
)
1086 pf
->num_vf_qps
= vf
->num_req_queues
;
1088 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
1090 /* allocate hw vsi context & associated resources */
1091 ret
= i40e_alloc_vsi_res(vf
, 0);
1094 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
1096 /* allocate additional VSIs based on tc information for ADq */
1097 if (vf
->adq_enabled
) {
1098 if (pf
->queues_left
>=
1099 (I40E_MAX_VF_QUEUES
- I40E_DEFAULT_QUEUES_PER_VF
)) {
1100 /* TC 0 always belongs to VF VSI */
1101 for (idx
= 1; idx
< vf
->num_tc
; idx
++) {
1102 ret
= i40e_alloc_vsi_res(vf
, idx
);
1106 /* send correct number of queues */
1107 total_queue_pairs
= I40E_MAX_VF_QUEUES
;
1109 dev_info(&pf
->pdev
->dev
, "VF %d: Not enough queues to allocate, disabling ADq\n",
1111 vf
->adq_enabled
= false;
1115 /* We account for each VF to get a default number of queue pairs. If
1116 * the VF has now requested more, we need to account for that to make
1117 * certain we never request more queues than we actually have left in
1120 if (total_queue_pairs
> I40E_DEFAULT_QUEUES_PER_VF
)
1122 total_queue_pairs
- I40E_DEFAULT_QUEUES_PER_VF
;
1125 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
1127 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
1129 /* store the total qps number for the runtime
1132 vf
->num_queue_pairs
= total_queue_pairs
;
1134 /* VF is now completely initialized */
1135 set_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1139 i40e_free_vf_res(vf
);
1144 #define VF_DEVICE_STATUS 0xAA
1145 #define VF_TRANS_PENDING_MASK 0x20
1147 * i40e_quiesce_vf_pci
1148 * @vf: pointer to the VF structure
1150 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1151 * if the transactions never clear.
1153 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
1155 struct i40e_pf
*pf
= vf
->pf
;
1156 struct i40e_hw
*hw
= &pf
->hw
;
1160 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1162 wr32(hw
, I40E_PF_PCI_CIAA
,
1163 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
1164 for (i
= 0; i
< 100; i
++) {
1165 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
1166 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
1174 * __i40e_getnum_vf_vsi_vlan_filters
1175 * @vsi: pointer to the vsi
1177 * called to get the number of VLANs offloaded on this VF
1179 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi
*vsi
)
1181 struct i40e_mac_filter
*f
;
1182 u16 num_vlans
= 0, bkt
;
1184 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
1185 if (f
->vlan
>= 0 && f
->vlan
<= I40E_MAX_VLANID
)
1193 * i40e_getnum_vf_vsi_vlan_filters
1194 * @vsi: pointer to the vsi
1196 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1198 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi
*vsi
)
1202 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1203 num_vlans
= __i40e_getnum_vf_vsi_vlan_filters(vsi
);
1204 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1210 * i40e_get_vlan_list_sync
1211 * @vsi: pointer to the VSI
1212 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1213 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1214 * This array is allocated here, but has to be freed in caller.
1216 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1218 static void i40e_get_vlan_list_sync(struct i40e_vsi
*vsi
, u16
*num_vlans
,
1221 struct i40e_mac_filter
*f
;
1225 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1226 *num_vlans
= __i40e_getnum_vf_vsi_vlan_filters(vsi
);
1227 *vlan_list
= kcalloc(*num_vlans
, sizeof(**vlan_list
), GFP_ATOMIC
);
1231 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
1232 if (f
->vlan
< 0 || f
->vlan
> I40E_MAX_VLANID
)
1234 (*vlan_list
)[i
++] = f
->vlan
;
1237 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1241 * i40e_set_vsi_promisc
1242 * @vf: pointer to the VF struct
1244 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1246 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1248 * @vl: List of VLANs - apply filter for given VLANs
1249 * @num_vlans: Number of elements in @vl
1252 i40e_set_vsi_promisc(struct i40e_vf
*vf
, u16 seid
, bool multi_enable
,
1253 bool unicast_enable
, s16
*vl
, u16 num_vlans
)
1255 struct i40e_pf
*pf
= vf
->pf
;
1256 struct i40e_hw
*hw
= &pf
->hw
;
1257 int aq_ret
, aq_tmp
= 0;
1260 /* No VLAN to set promisc on, set on VSI */
1261 if (!num_vlans
|| !vl
) {
1262 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, seid
,
1266 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1268 dev_err(&pf
->pdev
->dev
,
1269 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1272 i40e_aq_str(&pf
->hw
, aq_err
));
1277 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, seid
,
1282 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1284 dev_err(&pf
->pdev
->dev
,
1285 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1288 i40e_aq_str(&pf
->hw
, aq_err
));
1294 for (i
= 0; i
< num_vlans
; i
++) {
1295 aq_ret
= i40e_aq_set_vsi_mc_promisc_on_vlan(hw
, seid
,
1299 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1301 dev_err(&pf
->pdev
->dev
,
1302 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1305 i40e_aq_str(&pf
->hw
, aq_err
));
1311 aq_ret
= i40e_aq_set_vsi_uc_promisc_on_vlan(hw
, seid
,
1315 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1317 dev_err(&pf
->pdev
->dev
,
1318 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1321 i40e_aq_str(&pf
->hw
, aq_err
));
1335 * i40e_config_vf_promiscuous_mode
1336 * @vf: pointer to the VF info
1338 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1339 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1341 * Called from the VF to configure the promiscuous mode of
1342 * VF vsis and from the VF reset path to reset promiscuous mode.
1344 static int i40e_config_vf_promiscuous_mode(struct i40e_vf
*vf
,
1349 struct i40e_pf
*pf
= vf
->pf
;
1350 struct i40e_vsi
*vsi
;
1355 vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
1356 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
) || !vsi
)
1359 if (vf
->port_vlan_id
) {
1360 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
,
1361 alluni
, &vf
->port_vlan_id
, 1);
1363 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi
)) {
1364 i40e_get_vlan_list_sync(vsi
, &num_vlans
, &vl
);
1369 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
, alluni
,
1375 /* no VLANs to set on, set on VSI */
1376 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
, alluni
,
1382 * i40e_sync_vfr_reset
1383 * @hw: pointer to hw struct
1384 * @vf_id: VF identifier
1386 * Before trigger hardware reset, we need to know if no other process has
1387 * reserved the hardware for any reset operations. This check is done by
1388 * examining the status of the RSTAT1 register used to signal the reset.
1390 static int i40e_sync_vfr_reset(struct i40e_hw
*hw
, int vf_id
)
1395 for (i
= 0; i
< I40E_VFR_WAIT_COUNT
; i
++) {
1396 reg
= rd32(hw
, I40E_VFINT_ICR0_ENA(vf_id
)) &
1397 I40E_VFINT_ICR0_ADMINQ_MASK
;
1401 usleep_range(100, 200);
1408 * i40e_trigger_vf_reset
1409 * @vf: pointer to the VF structure
1410 * @flr: VFLR was issued or not
1412 * Trigger hardware to start a reset for a particular VF. Expects the caller
1413 * to wait the proper amount of time to allow hardware to reset the VF before
1414 * it cleans up and restores VF functionality.
1416 static void i40e_trigger_vf_reset(struct i40e_vf
*vf
, bool flr
)
1418 struct i40e_pf
*pf
= vf
->pf
;
1419 struct i40e_hw
*hw
= &pf
->hw
;
1420 u32 reg
, reg_idx
, bit_idx
;
1425 vf_active
= test_and_clear_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
1427 /* Disable VF's configuration API during reset. The flag is re-enabled
1428 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1429 * It's normally disabled in i40e_free_vf_res(), but it's safer
1430 * to do it earlier to give some time to finish to any VF config
1431 * functions that may still be running at this point.
1433 clear_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1435 /* In the case of a VFLR, the HW has already reset the VF and we
1436 * just need to clean up, so don't hit the VFRTRIG register.
1439 /* Sync VFR reset before trigger next one */
1440 radq
= rd32(hw
, I40E_VFINT_ICR0_ENA(vf
->vf_id
)) &
1441 I40E_VFINT_ICR0_ADMINQ_MASK
;
1442 if (vf_active
&& !radq
)
1443 /* waiting for finish reset by virtual driver */
1444 if (i40e_sync_vfr_reset(hw
, vf
->vf_id
))
1445 dev_info(&pf
->pdev
->dev
,
1446 "Reset VF %d never finished\n",
1449 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1450 * in progress state in rstat1 register.
1452 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
1453 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
1454 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
1457 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1458 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf
->vf_id
) / 32;
1459 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf
->vf_id
) % 32;
1460 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
1463 if (i40e_quiesce_vf_pci(vf
))
1464 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
1469 * i40e_cleanup_reset_vf
1470 * @vf: pointer to the VF structure
1472 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1473 * have verified whether the reset is finished properly, and ensure the
1474 * minimum amount of wait time has passed.
1476 static void i40e_cleanup_reset_vf(struct i40e_vf
*vf
)
1478 struct i40e_pf
*pf
= vf
->pf
;
1479 struct i40e_hw
*hw
= &pf
->hw
;
1482 /* disable promisc modes in case they were enabled */
1483 i40e_config_vf_promiscuous_mode(vf
, vf
->lan_vsi_id
, false, false);
1485 /* free VF resources to begin resetting the VSI state */
1486 i40e_free_vf_res(vf
);
1488 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1489 * By doing this we allow HW to access VF memory at any point. If we
1490 * did it any sooner, HW could access memory while it was being freed
1491 * in i40e_free_vf_res(), causing an IOMMU fault.
1493 * On the other hand, this needs to be done ASAP, because the VF driver
1494 * is waiting for this to happen and may report a timeout. It's
1495 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1498 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
1499 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
1500 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
1502 /* reallocate VF resources to finish resetting the VSI state */
1503 if (!i40e_alloc_vf_res(vf
)) {
1504 int abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1505 i40e_enable_vf_mappings(vf
);
1506 set_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
1507 clear_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
1508 /* Do not notify the client during VF init */
1509 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE
,
1511 i40e_notify_client_of_vf_reset(pf
, abs_vf_id
);
1515 /* Tell the VF driver the reset is done. This needs to be done only
1516 * after VF has been fully initialized, because the VF driver may
1517 * request resources immediately after setting this flag.
1519 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), VIRTCHNL_VFR_VFACTIVE
);
1524 * @vf: pointer to the VF structure
1525 * @flr: VFLR was issued or not
1527 * Returns true if the VF is in reset, resets successfully, or resets
1528 * are disabled and false otherwise.
1530 bool i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
1532 struct i40e_pf
*pf
= vf
->pf
;
1533 struct i40e_hw
*hw
= &pf
->hw
;
1538 if (test_bit(__I40E_VF_RESETS_DISABLED
, pf
->state
))
1541 /* Bail out if VFs are disabled. */
1542 if (test_bit(__I40E_VF_DISABLE
, pf
->state
))
1545 /* If VF is being reset already we don't need to continue. */
1546 if (test_and_set_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1549 i40e_trigger_vf_reset(vf
, flr
);
1551 /* poll VPGEN_VFRSTAT reg to make sure
1552 * that reset is complete
1554 for (i
= 0; i
< 10; i
++) {
1555 /* VF reset requires driver to first reset the VF and then
1556 * poll the status register to make sure that the reset
1557 * completed successfully. Due to internal HW FIFO flushes,
1558 * we must wait 10ms before the register will be valid.
1560 usleep_range(10000, 20000);
1561 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
1562 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
1569 usleep_range(10000, 20000);
1572 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
1574 usleep_range(10000, 20000);
1576 /* On initial reset, we don't have any queues to disable */
1577 if (vf
->lan_vsi_idx
!= 0)
1578 i40e_vsi_stop_rings(pf
->vsi
[vf
->lan_vsi_idx
]);
1580 i40e_cleanup_reset_vf(vf
);
1583 usleep_range(20000, 40000);
1584 clear_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
);
1590 * i40e_reset_all_vfs
1591 * @pf: pointer to the PF structure
1592 * @flr: VFLR was issued or not
1594 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1595 * VF, then do all the waiting in one chunk, and finally finish restoring each
1596 * VF after the wait. This is useful during PF routines which need to reset
1597 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1599 * Returns true if any VFs were reset, and false otherwise.
1601 bool i40e_reset_all_vfs(struct i40e_pf
*pf
, bool flr
)
1603 struct i40e_hw
*hw
= &pf
->hw
;
1608 /* If we don't have any VFs, then there is nothing to reset */
1609 if (!pf
->num_alloc_vfs
)
1612 /* If VFs have been disabled, there is no need to reset */
1613 if (test_and_set_bit(__I40E_VF_DISABLE
, pf
->state
))
1616 /* Begin reset on all VFs at once */
1617 for (v
= 0; v
< pf
->num_alloc_vfs
; v
++) {
1619 /* If VF is being reset no need to trigger reset again */
1620 if (!test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1621 i40e_trigger_vf_reset(&pf
->vf
[v
], flr
);
1624 /* HW requires some time to make sure it can flush the FIFO for a VF
1625 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1626 * sequence to make sure that it has completed. We'll keep track of
1627 * the VFs using a simple iterator that increments once that VF has
1628 * finished resetting.
1630 for (i
= 0, v
= 0; i
< 10 && v
< pf
->num_alloc_vfs
; i
++) {
1631 usleep_range(10000, 20000);
1633 /* Check each VF in sequence, beginning with the VF to fail
1634 * the previous check.
1636 while (v
< pf
->num_alloc_vfs
) {
1638 if (!test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
)) {
1639 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
1640 if (!(reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
))
1644 /* If the current VF has finished resetting, move on
1645 * to the next VF in sequence.
1652 usleep_range(10000, 20000);
1654 /* Display a warning if at least one VF didn't manage to reset in
1655 * time, but continue on with the operation.
1657 if (v
< pf
->num_alloc_vfs
)
1658 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
1660 usleep_range(10000, 20000);
1662 /* Begin disabling all the rings associated with VFs, but do not wait
1665 for (v
= 0; v
< pf
->num_alloc_vfs
; v
++) {
1666 /* On initial reset, we don't have any queues to disable */
1667 if (pf
->vf
[v
].lan_vsi_idx
== 0)
1670 /* If VF is reset in another thread just continue */
1671 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1674 i40e_vsi_stop_rings_no_wait(pf
->vsi
[pf
->vf
[v
].lan_vsi_idx
]);
1677 /* Now that we've notified HW to disable all of the VF rings, wait
1678 * until they finish.
1680 for (v
= 0; v
< pf
->num_alloc_vfs
; v
++) {
1681 /* On initial reset, we don't have any queues to disable */
1682 if (pf
->vf
[v
].lan_vsi_idx
== 0)
1685 /* If VF is reset in another thread just continue */
1686 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1689 i40e_vsi_wait_queues_disabled(pf
->vsi
[pf
->vf
[v
].lan_vsi_idx
]);
1692 /* Hw may need up to 50ms to finish disabling the RX queues. We
1693 * minimize the wait by delaying only once for all VFs.
1697 /* Finish the reset on each VF */
1698 for (v
= 0; v
< pf
->num_alloc_vfs
; v
++) {
1699 /* If VF is reset in another thread just continue */
1700 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1703 i40e_cleanup_reset_vf(&pf
->vf
[v
]);
1707 usleep_range(20000, 40000);
1708 clear_bit(__I40E_VF_DISABLE
, pf
->state
);
1715 * @pf: pointer to the PF structure
1719 void i40e_free_vfs(struct i40e_pf
*pf
)
1721 struct i40e_hw
*hw
= &pf
->hw
;
1722 u32 reg_idx
, bit_idx
;
1728 set_bit(__I40E_VFS_RELEASING
, pf
->state
);
1729 while (test_and_set_bit(__I40E_VF_DISABLE
, pf
->state
))
1730 usleep_range(1000, 2000);
1732 i40e_notify_client_of_vf_enable(pf
, 0);
1734 /* Disable IOV before freeing resources. This lets any VF drivers
1735 * running in the host get themselves cleaned up before we yank
1736 * the carpet out from underneath their feet.
1738 if (!pci_vfs_assigned(pf
->pdev
))
1739 pci_disable_sriov(pf
->pdev
);
1741 dev_warn(&pf
->pdev
->dev
, "VFs are assigned - not disabling SR-IOV\n");
1743 /* Amortize wait time by stopping all VFs at the same time */
1744 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1745 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1748 i40e_vsi_stop_rings_no_wait(pf
->vsi
[pf
->vf
[i
].lan_vsi_idx
]);
1751 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1752 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1755 i40e_vsi_wait_queues_disabled(pf
->vsi
[pf
->vf
[i
].lan_vsi_idx
]);
1758 /* free up VF resources */
1759 tmp
= pf
->num_alloc_vfs
;
1760 pf
->num_alloc_vfs
= 0;
1761 for (i
= 0; i
< tmp
; i
++) {
1762 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1763 i40e_free_vf_res(&pf
->vf
[i
]);
1764 /* disable qp mappings */
1765 i40e_disable_vf_mappings(&pf
->vf
[i
]);
1771 /* This check is for when the driver is unloaded while VFs are
1772 * assigned. Setting the number of VFs to 0 through sysfs is caught
1773 * before this function ever gets called.
1775 if (!pci_vfs_assigned(pf
->pdev
)) {
1776 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1777 * work correctly when SR-IOV gets re-enabled.
1779 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
1780 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1781 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1782 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
1785 clear_bit(__I40E_VF_DISABLE
, pf
->state
);
1786 clear_bit(__I40E_VFS_RELEASING
, pf
->state
);
1789 #ifdef CONFIG_PCI_IOV
1792 * @pf: pointer to the PF structure
1793 * @num_alloc_vfs: number of VFs to allocate
1795 * allocate VF resources
1797 int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
1799 struct i40e_vf
*vfs
;
1802 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1803 i40e_irq_dynamic_disable_icr0(pf
);
1805 /* Check to see if we're just allocating resources for extant VFs */
1806 if (pci_num_vf(pf
->pdev
) != num_alloc_vfs
) {
1807 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
1809 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
1810 pf
->num_alloc_vfs
= 0;
1814 /* allocate memory */
1815 vfs
= kcalloc(num_alloc_vfs
, sizeof(struct i40e_vf
), GFP_KERNEL
);
1822 /* apply default profile */
1823 for (i
= 0; i
< num_alloc_vfs
; i
++) {
1825 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
1828 /* assign default capabilities */
1829 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
1830 vfs
[i
].spoofchk
= true;
1832 set_bit(I40E_VF_STATE_PRE_ENABLE
, &vfs
[i
].vf_states
);
1835 pf
->num_alloc_vfs
= num_alloc_vfs
;
1837 /* VF resources get allocated during reset */
1838 i40e_reset_all_vfs(pf
, false);
1840 i40e_notify_client_of_vf_enable(pf
, num_alloc_vfs
);
1846 /* Re-enable interrupt 0. */
1847 i40e_irq_dynamic_enable_icr0(pf
);
1853 * i40e_pci_sriov_enable
1854 * @pdev: pointer to a pci_dev structure
1855 * @num_vfs: number of VFs to allocate
1857 * Enable or change the number of VFs
1859 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
1861 #ifdef CONFIG_PCI_IOV
1862 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
1863 int pre_existing_vfs
= pci_num_vf(pdev
);
1866 if (test_bit(__I40E_TESTING
, pf
->state
)) {
1867 dev_warn(&pdev
->dev
,
1868 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1873 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
1875 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
1878 if (num_vfs
> pf
->num_req_vfs
) {
1879 dev_warn(&pdev
->dev
, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1880 num_vfs
, pf
->num_req_vfs
);
1885 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
1886 err
= i40e_alloc_vfs(pf
, num_vfs
);
1888 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
1902 * i40e_pci_sriov_configure
1903 * @pdev: pointer to a pci_dev structure
1904 * @num_vfs: number of VFs to allocate
1906 * Enable or change the number of VFs. Called when the user updates the number
1909 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1911 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
1914 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
1915 dev_warn(&pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
1920 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
1921 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
1922 i40e_do_reset_safe(pf
, I40E_PF_RESET_AND_REBUILD_FLAG
);
1924 ret
= i40e_pci_sriov_enable(pdev
, num_vfs
);
1925 goto sriov_configure_out
;
1928 if (!pci_vfs_assigned(pf
->pdev
)) {
1930 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
1931 i40e_do_reset_safe(pf
, I40E_PF_RESET_AND_REBUILD_FLAG
);
1933 dev_warn(&pdev
->dev
, "Unable to free VFs because some are assigned to VMs.\n");
1935 goto sriov_configure_out
;
1937 sriov_configure_out
:
1938 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
1942 /***********************virtual channel routines******************/
1945 * i40e_vc_send_msg_to_vf
1946 * @vf: pointer to the VF info
1947 * @v_opcode: virtual channel opcode
1948 * @v_retval: virtual channel return value
1949 * @msg: pointer to the msg buffer
1950 * @msglen: msg length
1954 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
1955 u32 v_retval
, u8
*msg
, u16 msglen
)
1962 /* validate the request */
1963 if (!vf
|| vf
->vf_id
>= vf
->pf
->num_alloc_vfs
)
1968 abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1970 aq_ret
= i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, v_opcode
, v_retval
,
1973 dev_info(&pf
->pdev
->dev
,
1974 "Unable to send the message to VF %d aq_err %d\n",
1975 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
1983 * i40e_vc_send_resp_to_vf
1984 * @vf: pointer to the VF info
1985 * @opcode: operation code
1986 * @retval: return value
1988 * send resp msg to VF
1990 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
1991 enum virtchnl_ops opcode
,
1994 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
1998 * i40e_sync_vf_state
1999 * @vf: pointer to the VF info
2002 * Called from a VF message to synchronize the service with a potential
2005 static bool i40e_sync_vf_state(struct i40e_vf
*vf
, enum i40e_vf_states state
)
2009 /* When handling some messages, it needs VF state to be set.
2010 * It is possible that this flag is cleared during VF reset,
2011 * so there is a need to wait until the end of the reset to
2012 * handle the request message correctly.
2014 for (i
= 0; i
< I40E_VF_STATE_WAIT_COUNT
; i
++) {
2015 if (test_bit(state
, &vf
->vf_states
))
2017 usleep_range(10000, 20000);
2020 return test_bit(state
, &vf
->vf_states
);
2024 * i40e_vc_get_version_msg
2025 * @vf: pointer to the VF info
2026 * @msg: pointer to the msg buffer
2028 * called from the VF to request the API version used by the PF
2030 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
, u8
*msg
)
2032 struct virtchnl_version_info info
= {
2033 VIRTCHNL_VERSION_MAJOR
, VIRTCHNL_VERSION_MINOR
2036 vf
->vf_ver
= *(struct virtchnl_version_info
*)msg
;
2037 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2038 if (VF_IS_V10(&vf
->vf_ver
))
2039 info
.minor
= VIRTCHNL_VERSION_MINOR_NO_VF_CAPS
;
2040 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_VERSION
,
2042 sizeof(struct virtchnl_version_info
));
2046 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2047 * @vf: pointer to VF structure
2049 static void i40e_del_qch(struct i40e_vf
*vf
)
2051 struct i40e_pf
*pf
= vf
->pf
;
2054 /* first element in the array belongs to primary VF VSI and we shouldn't
2055 * delete it. We should however delete the rest of the VSIs created
2057 for (i
= 1; i
< vf
->num_tc
; i
++) {
2058 if (vf
->ch
[i
].vsi_idx
) {
2059 i40e_vsi_release(pf
->vsi
[vf
->ch
[i
].vsi_idx
]);
2060 vf
->ch
[i
].vsi_idx
= 0;
2061 vf
->ch
[i
].vsi_id
= 0;
2067 * i40e_vc_get_max_frame_size
2068 * @vf: pointer to the VF
2070 * Max frame size is determined based on the current port's max frame size and
2071 * whether a port VLAN is configured on this VF. The VF is not aware whether
2072 * it's in a port VLAN so the PF needs to account for this in max frame size
2073 * checks and sending the max frame size to the VF.
2075 static u16
i40e_vc_get_max_frame_size(struct i40e_vf
*vf
)
2077 u16 max_frame_size
= vf
->pf
->hw
.phy
.link_info
.max_frame_size
;
2079 if (vf
->port_vlan_id
)
2080 max_frame_size
-= VLAN_HLEN
;
2082 return max_frame_size
;
2086 * i40e_vc_get_vf_resources_msg
2087 * @vf: pointer to the VF info
2088 * @msg: pointer to the msg buffer
2090 * called from the VF to request its resources
2092 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
, u8
*msg
)
2094 struct virtchnl_vf_resource
*vfres
= NULL
;
2095 struct i40e_pf
*pf
= vf
->pf
;
2096 struct i40e_vsi
*vsi
;
2102 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_INIT
)) {
2107 len
= virtchnl_struct_size(vfres
, vsi_res
, num_vsis
);
2108 vfres
= kzalloc(len
, GFP_KERNEL
);
2114 if (VF_IS_V11(&vf
->vf_ver
))
2115 vf
->driver_caps
= *(u32
*)msg
;
2117 vf
->driver_caps
= VIRTCHNL_VF_OFFLOAD_L2
|
2118 VIRTCHNL_VF_OFFLOAD_RSS_REG
|
2119 VIRTCHNL_VF_OFFLOAD_VLAN
;
2121 vfres
->vf_cap_flags
= VIRTCHNL_VF_OFFLOAD_L2
;
2122 vfres
->vf_cap_flags
|= VIRTCHNL_VF_CAP_ADV_LINK_SPEED
;
2123 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2124 if (!vsi
->info
.pvid
)
2125 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_VLAN
;
2127 if (i40e_vf_client_capable(pf
, vf
->vf_id
) &&
2128 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RDMA
)) {
2129 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RDMA
;
2130 set_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
);
2132 clear_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
);
2135 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
2136 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_PF
;
2138 if ((pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
) &&
2139 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_AQ
))
2140 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_AQ
;
2142 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_REG
;
2145 if (pf
->hw_features
& I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE
) {
2146 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
2147 vfres
->vf_cap_flags
|=
2148 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
;
2151 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP
)
2152 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP
;
2154 if ((pf
->hw_features
& I40E_HW_OUTER_UDP_CSUM_CAPABLE
) &&
2155 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
))
2156 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
;
2158 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RX_POLLING
) {
2159 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
2160 dev_err(&pf
->pdev
->dev
,
2161 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2166 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RX_POLLING
;
2169 if (pf
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
) {
2170 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
2171 vfres
->vf_cap_flags
|=
2172 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
;
2175 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
)
2176 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
;
2178 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ADQ
)
2179 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ADQ
;
2181 vfres
->num_vsis
= num_vsis
;
2182 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
2183 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
2184 vfres
->rss_key_size
= I40E_HKEY_ARRAY_SIZE
;
2185 vfres
->rss_lut_size
= I40E_VF_HLUT_ARRAY_SIZE
;
2186 vfres
->max_mtu
= i40e_vc_get_max_frame_size(vf
);
2188 if (vf
->lan_vsi_idx
) {
2189 vfres
->vsi_res
[0].vsi_id
= vf
->lan_vsi_id
;
2190 vfres
->vsi_res
[0].vsi_type
= VIRTCHNL_VSI_SRIOV
;
2191 vfres
->vsi_res
[0].num_queue_pairs
= vsi
->alloc_queue_pairs
;
2192 /* VFs only use TC 0 */
2193 vfres
->vsi_res
[0].qset_handle
2194 = le16_to_cpu(vsi
->info
.qs_handle
[0]);
2195 if (!(vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_USO
) && !vf
->pf_set_mac
) {
2196 i40e_del_mac_filter(vsi
, vf
->default_lan_addr
.addr
);
2197 eth_zero_addr(vf
->default_lan_addr
.addr
);
2199 ether_addr_copy(vfres
->vsi_res
[0].default_mac_addr
,
2200 vf
->default_lan_addr
.addr
);
2202 set_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
2205 /* send the response back to the VF */
2206 ret
= i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_VF_RESOURCES
,
2207 aq_ret
, (u8
*)vfres
, len
);
2214 * i40e_vc_config_promiscuous_mode_msg
2215 * @vf: pointer to the VF info
2216 * @msg: pointer to the msg buffer
2218 * called from the VF to configure the promiscuous mode of
2221 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
, u8
*msg
)
2223 struct virtchnl_promisc_info
*info
=
2224 (struct virtchnl_promisc_info
*)msg
;
2225 struct i40e_pf
*pf
= vf
->pf
;
2226 bool allmulti
= false;
2227 bool alluni
= false;
2230 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2234 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
2235 dev_err(&pf
->pdev
->dev
,
2236 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2239 /* Lie to the VF on purpose, because this is an error we can
2240 * ignore. Unprivileged VF is not a virtual channel error.
2246 if (info
->flags
> I40E_MAX_VF_PROMISC_FLAGS
) {
2251 if (!i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
)) {
2256 /* Multicast promiscuous handling*/
2257 if (info
->flags
& FLAG_VF_MULTICAST_PROMISC
)
2260 if (info
->flags
& FLAG_VF_UNICAST_PROMISC
)
2262 aq_ret
= i40e_config_vf_promiscuous_mode(vf
, info
->vsi_id
, allmulti
,
2268 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC
,
2270 dev_info(&pf
->pdev
->dev
,
2271 "VF %d successfully set multicast promiscuous mode\n",
2273 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC
,
2275 dev_info(&pf
->pdev
->dev
,
2276 "VF %d successfully unset multicast promiscuous mode\n",
2280 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC
,
2282 dev_info(&pf
->pdev
->dev
,
2283 "VF %d successfully set unicast promiscuous mode\n",
2285 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC
,
2287 dev_info(&pf
->pdev
->dev
,
2288 "VF %d successfully unset unicast promiscuous mode\n",
2292 /* send the response to the VF */
2293 return i40e_vc_send_resp_to_vf(vf
,
2294 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
2299 * i40e_vc_config_queues_msg
2300 * @vf: pointer to the VF info
2301 * @msg: pointer to the msg buffer
2303 * called from the VF to configure the rx/tx
2306 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2308 struct virtchnl_vsi_queue_config_info
*qci
=
2309 (struct virtchnl_vsi_queue_config_info
*)msg
;
2310 struct virtchnl_queue_pair_info
*qpi
;
2311 u16 vsi_id
, vsi_queue_id
= 0;
2312 struct i40e_pf
*pf
= vf
->pf
;
2313 int i
, j
= 0, idx
= 0;
2314 struct i40e_vsi
*vsi
;
2315 u16 num_qps_all
= 0;
2318 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2323 if (!i40e_vc_isvalid_vsi_id(vf
, qci
->vsi_id
)) {
2328 if (qci
->num_queue_pairs
> I40E_MAX_VF_QUEUES
) {
2333 if (vf
->adq_enabled
) {
2334 for (i
= 0; i
< vf
->num_tc
; i
++)
2335 num_qps_all
+= vf
->ch
[i
].num_qps
;
2336 if (num_qps_all
!= qci
->num_queue_pairs
) {
2342 vsi_id
= qci
->vsi_id
;
2344 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
2345 qpi
= &qci
->qpair
[i
];
2347 if (!vf
->adq_enabled
) {
2348 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
2349 qpi
->txq
.queue_id
)) {
2354 vsi_queue_id
= qpi
->txq
.queue_id
;
2356 if (qpi
->txq
.vsi_id
!= qci
->vsi_id
||
2357 qpi
->rxq
.vsi_id
!= qci
->vsi_id
||
2358 qpi
->rxq
.queue_id
!= vsi_queue_id
) {
2364 if (vf
->adq_enabled
) {
2365 if (idx
>= ARRAY_SIZE(vf
->ch
)) {
2369 vsi_id
= vf
->ch
[idx
].vsi_id
;
2372 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
2374 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
2380 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2381 * VF does not know about these additional VSIs and all
2382 * it cares is about its own queues. PF configures these queues
2383 * to its appropriate VSIs based on TC mapping
2385 if (vf
->adq_enabled
) {
2386 if (idx
>= ARRAY_SIZE(vf
->ch
)) {
2390 if (j
== (vf
->ch
[idx
].num_qps
- 1)) {
2392 j
= 0; /* resetting the queue count */
2400 /* set vsi num_queue_pairs in use to num configured by VF */
2401 if (!vf
->adq_enabled
) {
2402 pf
->vsi
[vf
->lan_vsi_idx
]->num_queue_pairs
=
2403 qci
->num_queue_pairs
;
2405 for (i
= 0; i
< vf
->num_tc
; i
++) {
2406 vsi
= pf
->vsi
[vf
->ch
[i
].vsi_idx
];
2407 vsi
->num_queue_pairs
= vf
->ch
[i
].num_qps
;
2409 if (i40e_update_adq_vsi_queues(vsi
, i
)) {
2417 /* send the response to the VF */
2418 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
2423 * i40e_validate_queue_map - check queue map is valid
2424 * @vf: the VF structure pointer
2426 * @queuemap: Tx or Rx queue map
2428 * check if Tx or Rx queue map is valid
2430 static int i40e_validate_queue_map(struct i40e_vf
*vf
, u16 vsi_id
,
2431 unsigned long queuemap
)
2433 u16 vsi_queue_id
, queue_id
;
2435 for_each_set_bit(vsi_queue_id
, &queuemap
, I40E_MAX_VSI_QP
) {
2436 if (vf
->adq_enabled
) {
2437 vsi_id
= vf
->ch
[vsi_queue_id
/ I40E_MAX_VF_VSI
].vsi_id
;
2438 queue_id
= (vsi_queue_id
% I40E_DEFAULT_QUEUES_PER_VF
);
2440 queue_id
= vsi_queue_id
;
2443 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
, queue_id
))
2451 * i40e_vc_config_irq_map_msg
2452 * @vf: pointer to the VF info
2453 * @msg: pointer to the msg buffer
2455 * called from the VF to configure the irq to
2458 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
)
2460 struct virtchnl_irq_map_info
*irqmap_info
=
2461 (struct virtchnl_irq_map_info
*)msg
;
2462 struct virtchnl_vector_map
*map
;
2467 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2472 if (irqmap_info
->num_vectors
>
2473 vf
->pf
->hw
.func_caps
.num_msix_vectors_vf
) {
2478 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
2479 map
= &irqmap_info
->vecmap
[i
];
2480 /* validate msg params */
2481 if (!i40e_vc_isvalid_vector_id(vf
, map
->vector_id
) ||
2482 !i40e_vc_isvalid_vsi_id(vf
, map
->vsi_id
)) {
2486 vsi_id
= map
->vsi_id
;
2488 if (i40e_validate_queue_map(vf
, vsi_id
, map
->rxq_map
)) {
2493 if (i40e_validate_queue_map(vf
, vsi_id
, map
->txq_map
)) {
2498 i40e_config_irq_link_list(vf
, vsi_id
, map
);
2501 /* send the response to the VF */
2502 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_IRQ_MAP
,
2507 * i40e_ctrl_vf_tx_rings
2508 * @vsi: the SRIOV VSI being configured
2509 * @q_map: bit map of the queues to be enabled
2510 * @enable: start or stop the queue
2512 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi
*vsi
, unsigned long q_map
,
2515 struct i40e_pf
*pf
= vsi
->back
;
2519 for_each_set_bit(q_id
, &q_map
, I40E_MAX_VF_QUEUES
) {
2520 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
2521 vsi
->base_queue
+ q_id
,
2522 false /*is xdp*/, enable
);
2530 * i40e_ctrl_vf_rx_rings
2531 * @vsi: the SRIOV VSI being configured
2532 * @q_map: bit map of the queues to be enabled
2533 * @enable: start or stop the queue
2535 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi
*vsi
, unsigned long q_map
,
2538 struct i40e_pf
*pf
= vsi
->back
;
2542 for_each_set_bit(q_id
, &q_map
, I40E_MAX_VF_QUEUES
) {
2543 ret
= i40e_control_wait_rx_q(pf
, vsi
->base_queue
+ q_id
,
2552 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2553 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2555 * Returns true if validation was successful, else false.
2557 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select
*vqs
)
2559 if ((!vqs
->rx_queues
&& !vqs
->tx_queues
) ||
2560 vqs
->rx_queues
>= BIT(I40E_MAX_VF_QUEUES
) ||
2561 vqs
->tx_queues
>= BIT(I40E_MAX_VF_QUEUES
))
2568 * i40e_vc_enable_queues_msg
2569 * @vf: pointer to the VF info
2570 * @msg: pointer to the msg buffer
2572 * called from the VF to enable all or specific queue(s)
2574 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2576 struct virtchnl_queue_select
*vqs
=
2577 (struct virtchnl_queue_select
*)msg
;
2578 struct i40e_pf
*pf
= vf
->pf
;
2582 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
)) {
2587 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2592 if (!i40e_vc_validate_vqs_bitmaps(vqs
)) {
2597 /* Use the queue bit map sent by the VF */
2598 if (i40e_ctrl_vf_rx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->rx_queues
,
2603 if (i40e_ctrl_vf_tx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->tx_queues
,
2609 /* need to start the rings for additional ADq VSI's as well */
2610 if (vf
->adq_enabled
) {
2611 /* zero belongs to LAN VSI */
2612 for (i
= 1; i
< vf
->num_tc
; i
++) {
2613 if (i40e_vsi_start_rings(pf
->vsi
[vf
->ch
[i
].vsi_idx
]))
2619 /* send the response to the VF */
2620 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_QUEUES
,
2625 * i40e_vc_disable_queues_msg
2626 * @vf: pointer to the VF info
2627 * @msg: pointer to the msg buffer
2629 * called from the VF to disable all or specific
2632 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2634 struct virtchnl_queue_select
*vqs
=
2635 (struct virtchnl_queue_select
*)msg
;
2636 struct i40e_pf
*pf
= vf
->pf
;
2639 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2644 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2649 if (!i40e_vc_validate_vqs_bitmaps(vqs
)) {
2654 /* Use the queue bit map sent by the VF */
2655 if (i40e_ctrl_vf_tx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->tx_queues
,
2660 if (i40e_ctrl_vf_rx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->rx_queues
,
2666 /* send the response to the VF */
2667 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_QUEUES
,
2672 * i40e_check_enough_queue - find big enough queue number
2673 * @vf: pointer to the VF info
2674 * @needed: the number of items needed
2676 * Returns the base item index of the queue, or negative for error
2678 static int i40e_check_enough_queue(struct i40e_vf
*vf
, u16 needed
)
2680 unsigned int i
, cur_queues
, more
, pool_size
;
2681 struct i40e_lump_tracking
*pile
;
2682 struct i40e_pf
*pf
= vf
->pf
;
2683 struct i40e_vsi
*vsi
;
2685 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2686 cur_queues
= vsi
->alloc_queue_pairs
;
2688 /* if current allocated queues are enough for need */
2689 if (cur_queues
>= needed
)
2690 return vsi
->base_queue
;
2693 if (cur_queues
> 0) {
2694 /* if the allocated queues are not zero
2695 * just check if there are enough queues for more
2696 * behind the allocated queues.
2698 more
= needed
- cur_queues
;
2699 for (i
= vsi
->base_queue
+ cur_queues
;
2700 i
< pile
->num_entries
; i
++) {
2701 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
)
2705 /* there is enough */
2706 return vsi
->base_queue
;
2711 for (i
= 0; i
< pile
->num_entries
; i
++) {
2712 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
2716 if (needed
<= ++pool_size
)
2717 /* there is enough */
2725 * i40e_vc_request_queues_msg
2726 * @vf: pointer to the VF info
2727 * @msg: pointer to the msg buffer
2729 * VFs get a default number of queues but can use this message to request a
2730 * different number. If the request is successful, PF will reset the VF and
2731 * return 0. If unsuccessful, PF will send message informing VF of number of
2732 * available queues and return result of sending VF a message.
2734 static int i40e_vc_request_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2736 struct virtchnl_vf_res_request
*vfres
=
2737 (struct virtchnl_vf_res_request
*)msg
;
2738 u16 req_pairs
= vfres
->num_queue_pairs
;
2739 u8 cur_pairs
= vf
->num_queue_pairs
;
2740 struct i40e_pf
*pf
= vf
->pf
;
2742 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
))
2745 if (req_pairs
> I40E_MAX_VF_QUEUES
) {
2746 dev_err(&pf
->pdev
->dev
,
2747 "VF %d tried to request more than %d queues.\n",
2749 I40E_MAX_VF_QUEUES
);
2750 vfres
->num_queue_pairs
= I40E_MAX_VF_QUEUES
;
2751 } else if (req_pairs
- cur_pairs
> pf
->queues_left
) {
2752 dev_warn(&pf
->pdev
->dev
,
2753 "VF %d requested %d more queues, but only %d left.\n",
2755 req_pairs
- cur_pairs
,
2757 vfres
->num_queue_pairs
= pf
->queues_left
+ cur_pairs
;
2758 } else if (i40e_check_enough_queue(vf
, req_pairs
) < 0) {
2759 dev_warn(&pf
->pdev
->dev
,
2760 "VF %d requested %d more queues, but there is not enough for it.\n",
2762 req_pairs
- cur_pairs
);
2763 vfres
->num_queue_pairs
= cur_pairs
;
2765 /* successful request */
2766 vf
->num_req_queues
= req_pairs
;
2767 i40e_vc_reset_vf(vf
, true);
2771 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_REQUEST_QUEUES
, 0,
2772 (u8
*)vfres
, sizeof(*vfres
));
2776 * i40e_vc_get_stats_msg
2777 * @vf: pointer to the VF info
2778 * @msg: pointer to the msg buffer
2780 * called from the VF to get vsi stats
2782 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
)
2784 struct virtchnl_queue_select
*vqs
=
2785 (struct virtchnl_queue_select
*)msg
;
2786 struct i40e_pf
*pf
= vf
->pf
;
2787 struct i40e_eth_stats stats
;
2789 struct i40e_vsi
*vsi
;
2791 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
2793 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2798 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2803 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2808 i40e_update_eth_stats(vsi
);
2809 stats
= vsi
->eth_stats
;
2812 /* send the response back to the VF */
2813 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_STATS
, aq_ret
,
2814 (u8
*)&stats
, sizeof(stats
));
2817 #define I40E_MAX_MACVLAN_PER_HW 3072
2818 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2820 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2821 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2823 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2824 #define I40E_VC_MAX_VLAN_PER_VF 16
2826 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2827 ({ typeof(vf_num) vf_num_ = (vf_num); \
2828 typeof(num_ports) num_ports_ = (num_ports); \
2829 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2830 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2831 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2833 * i40e_check_vf_permission
2834 * @vf: pointer to the VF info
2835 * @al: MAC address list from virtchnl
2837 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2838 * if any address in the list is not valid. Checks the following conditions:
2840 * 1) broadcast and zero addresses are never valid
2841 * 2) unicast addresses are not allowed if the VMM has administratively set
2842 * the VF MAC address, unless the VF is marked as privileged.
2843 * 3) There is enough space to add all the addresses.
2845 * Note that to guarantee consistency, it is expected this function be called
2846 * while holding the mac_filter_hash_lock, as otherwise the current number of
2847 * addresses might not be accurate.
2849 static inline int i40e_check_vf_permission(struct i40e_vf
*vf
,
2850 struct virtchnl_ether_addr_list
*al
)
2852 struct i40e_pf
*pf
= vf
->pf
;
2853 struct i40e_vsi
*vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2854 struct i40e_hw
*hw
= &pf
->hw
;
2855 int mac2add_cnt
= 0;
2858 for (i
= 0; i
< al
->num_elements
; i
++) {
2859 struct i40e_mac_filter
*f
;
2860 u8
*addr
= al
->list
[i
].addr
;
2862 if (is_broadcast_ether_addr(addr
) ||
2863 is_zero_ether_addr(addr
)) {
2864 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n",
2869 /* If the host VMM administrator has set the VF MAC address
2870 * administratively via the ndo_set_vf_mac command then deny
2871 * permission to the VF to add or delete unicast MAC addresses.
2872 * Unless the VF is privileged and then it can do whatever.
2873 * The VF may request to set the MAC address filter already
2874 * assigned to it so do not return an error in that case.
2876 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) &&
2877 !is_multicast_ether_addr(addr
) && vf
->pf_set_mac
&&
2878 !ether_addr_equal(addr
, vf
->default_lan_addr
.addr
)) {
2879 dev_err(&pf
->pdev
->dev
,
2880 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2884 /*count filters that really will be added*/
2885 f
= i40e_find_mac(vsi
, addr
);
2890 /* If this VF is not privileged, then we can't add more than a limited
2891 * number of addresses. Check to make sure that the additions do not
2892 * push us over the limit.
2894 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
2895 if ((i40e_count_filters(vsi
) + mac2add_cnt
) >
2896 I40E_VC_MAX_MAC_ADDR_PER_VF
) {
2897 dev_err(&pf
->pdev
->dev
,
2898 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2901 /* If this VF is trusted, it can use more resources than untrusted.
2902 * However to ensure that every trusted VF has appropriate number of
2903 * resources, divide whole pool of resources per port and then across
2907 if ((i40e_count_filters(vsi
) + mac2add_cnt
) >
2908 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf
->num_alloc_vfs
,
2910 dev_err(&pf
->pdev
->dev
,
2911 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2919 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2920 * @vc_ether_addr: used to extract the type
2923 i40e_vc_ether_addr_type(struct virtchnl_ether_addr
*vc_ether_addr
)
2925 return vc_ether_addr
->type
& VIRTCHNL_ETHER_ADDR_TYPE_MASK
;
2929 * i40e_is_vc_addr_legacy
2930 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2932 * check if the MAC address is from an older VF
2935 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr
*vc_ether_addr
)
2937 return i40e_vc_ether_addr_type(vc_ether_addr
) ==
2938 VIRTCHNL_ETHER_ADDR_LEGACY
;
2942 * i40e_is_vc_addr_primary
2943 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2945 * check if the MAC address is the VF's primary MAC
2946 * This function should only be called when the MAC address in
2947 * virtchnl_ether_addr is a valid unicast MAC
2950 i40e_is_vc_addr_primary(struct virtchnl_ether_addr
*vc_ether_addr
)
2952 return i40e_vc_ether_addr_type(vc_ether_addr
) ==
2953 VIRTCHNL_ETHER_ADDR_PRIMARY
;
2957 * i40e_update_vf_mac_addr
2959 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
2961 * update the VF's cached hardware MAC if allowed
2964 i40e_update_vf_mac_addr(struct i40e_vf
*vf
,
2965 struct virtchnl_ether_addr
*vc_ether_addr
)
2967 u8
*mac_addr
= vc_ether_addr
->addr
;
2969 if (!is_valid_ether_addr(mac_addr
))
2972 /* If request to add MAC filter is a primary request update its default
2973 * MAC address with the requested one. If it is a legacy request then
2974 * check if current default is empty if so update the default MAC
2976 if (i40e_is_vc_addr_primary(vc_ether_addr
)) {
2977 ether_addr_copy(vf
->default_lan_addr
.addr
, mac_addr
);
2978 } else if (i40e_is_vc_addr_legacy(vc_ether_addr
)) {
2979 if (is_zero_ether_addr(vf
->default_lan_addr
.addr
))
2980 ether_addr_copy(vf
->default_lan_addr
.addr
, mac_addr
);
2985 * i40e_vc_add_mac_addr_msg
2986 * @vf: pointer to the VF info
2987 * @msg: pointer to the msg buffer
2989 * add guest mac address filter
2991 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
)
2993 struct virtchnl_ether_addr_list
*al
=
2994 (struct virtchnl_ether_addr_list
*)msg
;
2995 struct i40e_pf
*pf
= vf
->pf
;
2996 struct i40e_vsi
*vsi
= NULL
;
3000 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3001 !i40e_vc_isvalid_vsi_id(vf
, al
->vsi_id
)) {
3006 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3008 /* Lock once, because all function inside for loop accesses VSI's
3009 * MAC filter list which needs to be protected using same lock.
3011 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3013 ret
= i40e_check_vf_permission(vf
, al
);
3015 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3019 /* add new addresses to the list */
3020 for (i
= 0; i
< al
->num_elements
; i
++) {
3021 struct i40e_mac_filter
*f
;
3023 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
);
3025 f
= i40e_add_mac_filter(vsi
, al
->list
[i
].addr
);
3028 dev_err(&pf
->pdev
->dev
,
3029 "Unable to add MAC filter %pM for VF %d\n",
3030 al
->list
[i
].addr
, vf
->vf_id
);
3032 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3036 i40e_update_vf_mac_addr(vf
, &al
->list
[i
]);
3038 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3040 /* program the updated filter list */
3041 ret
= i40e_sync_vsi_filters(vsi
);
3043 dev_err(&pf
->pdev
->dev
, "Unable to program VF %d MAC filters, error %d\n",
3047 /* send the response to the VF */
3048 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_ADD_ETH_ADDR
,
3053 * i40e_vc_del_mac_addr_msg
3054 * @vf: pointer to the VF info
3055 * @msg: pointer to the msg buffer
3057 * remove guest mac address filter
3059 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
)
3061 struct virtchnl_ether_addr_list
*al
=
3062 (struct virtchnl_ether_addr_list
*)msg
;
3063 bool was_unimac_deleted
= false;
3064 struct i40e_pf
*pf
= vf
->pf
;
3065 struct i40e_vsi
*vsi
= NULL
;
3069 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3070 !i40e_vc_isvalid_vsi_id(vf
, al
->vsi_id
)) {
3075 for (i
= 0; i
< al
->num_elements
; i
++) {
3076 if (is_broadcast_ether_addr(al
->list
[i
].addr
) ||
3077 is_zero_ether_addr(al
->list
[i
].addr
)) {
3078 dev_err(&pf
->pdev
->dev
, "Invalid MAC addr %pM for VF %d\n",
3079 al
->list
[i
].addr
, vf
->vf_id
);
3083 if (ether_addr_equal(al
->list
[i
].addr
, vf
->default_lan_addr
.addr
))
3084 was_unimac_deleted
= true;
3086 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3088 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3089 /* delete addresses from the list */
3090 for (i
= 0; i
< al
->num_elements
; i
++)
3091 if (i40e_del_mac_filter(vsi
, al
->list
[i
].addr
)) {
3093 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3097 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3099 if (was_unimac_deleted
)
3100 eth_zero_addr(vf
->default_lan_addr
.addr
);
3102 /* program the updated filter list */
3103 ret
= i40e_sync_vsi_filters(vsi
);
3105 dev_err(&pf
->pdev
->dev
, "Unable to program VF %d MAC filters, error %d\n",
3108 if (vf
->trusted
&& was_unimac_deleted
) {
3109 struct i40e_mac_filter
*f
;
3110 struct hlist_node
*h
;
3114 /* set last unicast mac address as default */
3115 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3116 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
3117 if (is_valid_ether_addr(f
->macaddr
))
3118 macaddr
= f
->macaddr
;
3121 ether_addr_copy(vf
->default_lan_addr
.addr
, macaddr
);
3122 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3125 /* send the response to the VF */
3126 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_ETH_ADDR
, ret
);
3130 * i40e_vc_add_vlan_msg
3131 * @vf: pointer to the VF info
3132 * @msg: pointer to the msg buffer
3134 * program guest vlan id
3136 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
)
3138 struct virtchnl_vlan_filter_list
*vfl
=
3139 (struct virtchnl_vlan_filter_list
*)msg
;
3140 struct i40e_pf
*pf
= vf
->pf
;
3141 struct i40e_vsi
*vsi
= NULL
;
3145 if ((vf
->num_vlan
>= I40E_VC_MAX_VLAN_PER_VF
) &&
3146 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
3147 dev_err(&pf
->pdev
->dev
,
3148 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3151 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3152 !i40e_vc_isvalid_vsi_id(vf
, vfl
->vsi_id
)) {
3157 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3158 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
3160 dev_err(&pf
->pdev
->dev
,
3161 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
3165 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3166 if (vsi
->info
.pvid
) {
3171 i40e_vlan_stripping_enable(vsi
);
3172 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3173 /* add new VLAN filter */
3174 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
3178 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
3179 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3183 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
3184 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3190 dev_err(&pf
->pdev
->dev
,
3191 "Unable to add VLAN filter %d for VF %d, error %d\n",
3192 vfl
->vlan_id
[i
], vf
->vf_id
, ret
);
3196 /* send the response to the VF */
3197 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
3201 * i40e_vc_remove_vlan_msg
3202 * @vf: pointer to the VF info
3203 * @msg: pointer to the msg buffer
3205 * remove programmed guest vlan id
3207 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
)
3209 struct virtchnl_vlan_filter_list
*vfl
=
3210 (struct virtchnl_vlan_filter_list
*)msg
;
3211 struct i40e_pf
*pf
= vf
->pf
;
3212 struct i40e_vsi
*vsi
= NULL
;
3216 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3217 !i40e_vc_isvalid_vsi_id(vf
, vfl
->vsi_id
)) {
3222 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3223 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
3229 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3230 if (vsi
->info
.pvid
) {
3231 if (vfl
->num_elements
> 1 || vfl
->vlan_id
[0])
3236 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3237 i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
3240 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
3241 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3245 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
3246 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3253 /* send the response to the VF */
3254 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
3259 * @vf: pointer to the VF info
3260 * @msg: pointer to the msg buffer
3261 * @msglen: msg length
3263 * called from the VF for the iwarp msgs
3265 static int i40e_vc_rdma_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
3267 struct i40e_pf
*pf
= vf
->pf
;
3268 int abs_vf_id
= vf
->vf_id
+ pf
->hw
.func_caps
.vf_base_id
;
3271 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3272 !test_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
)) {
3277 i40e_notify_client_of_vf_msg(pf
->vsi
[pf
->lan_vsi
], abs_vf_id
,
3281 /* send the response to the VF */
3282 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_RDMA
,
3287 * i40e_vc_rdma_qvmap_msg
3288 * @vf: pointer to the VF info
3289 * @msg: pointer to the msg buffer
3290 * @config: config qvmap or release it
3292 * called from the VF for the iwarp msgs
3294 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf
*vf
, u8
*msg
, bool config
)
3296 struct virtchnl_rdma_qvlist_info
*qvlist_info
=
3297 (struct virtchnl_rdma_qvlist_info
*)msg
;
3300 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3301 !test_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
)) {
3307 if (i40e_config_rdma_qvlist(vf
, qvlist_info
))
3310 i40e_release_rdma_qvlist(vf
);
3314 /* send the response to the VF */
3315 return i40e_vc_send_resp_to_vf(vf
,
3316 config
? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
:
3317 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP
,
3322 * i40e_vc_config_rss_key
3323 * @vf: pointer to the VF info
3324 * @msg: pointer to the msg buffer
3326 * Configure the VF's RSS key
3328 static int i40e_vc_config_rss_key(struct i40e_vf
*vf
, u8
*msg
)
3330 struct virtchnl_rss_key
*vrk
=
3331 (struct virtchnl_rss_key
*)msg
;
3332 struct i40e_pf
*pf
= vf
->pf
;
3333 struct i40e_vsi
*vsi
= NULL
;
3336 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3337 !i40e_vc_isvalid_vsi_id(vf
, vrk
->vsi_id
) ||
3338 vrk
->key_len
!= I40E_HKEY_ARRAY_SIZE
) {
3343 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3344 aq_ret
= i40e_config_rss(vsi
, vrk
->key
, NULL
, 0);
3346 /* send the response to the VF */
3347 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_KEY
,
3352 * i40e_vc_config_rss_lut
3353 * @vf: pointer to the VF info
3354 * @msg: pointer to the msg buffer
3356 * Configure the VF's RSS LUT
3358 static int i40e_vc_config_rss_lut(struct i40e_vf
*vf
, u8
*msg
)
3360 struct virtchnl_rss_lut
*vrl
=
3361 (struct virtchnl_rss_lut
*)msg
;
3362 struct i40e_pf
*pf
= vf
->pf
;
3363 struct i40e_vsi
*vsi
= NULL
;
3367 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3368 !i40e_vc_isvalid_vsi_id(vf
, vrl
->vsi_id
) ||
3369 vrl
->lut_entries
!= I40E_VF_HLUT_ARRAY_SIZE
) {
3374 for (i
= 0; i
< vrl
->lut_entries
; i
++)
3375 if (vrl
->lut
[i
] >= vf
->num_queue_pairs
) {
3380 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3381 aq_ret
= i40e_config_rss(vsi
, NULL
, vrl
->lut
, I40E_VF_HLUT_ARRAY_SIZE
);
3382 /* send the response to the VF */
3384 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_LUT
,
3389 * i40e_vc_get_rss_hena
3390 * @vf: pointer to the VF info
3391 * @msg: pointer to the msg buffer
3393 * Return the RSS HENA bits allowed by the hardware
3395 static int i40e_vc_get_rss_hena(struct i40e_vf
*vf
, u8
*msg
)
3397 struct virtchnl_rss_hena
*vrh
= NULL
;
3398 struct i40e_pf
*pf
= vf
->pf
;
3402 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3406 len
= sizeof(struct virtchnl_rss_hena
);
3408 vrh
= kzalloc(len
, GFP_KERNEL
);
3414 vrh
->hena
= i40e_pf_get_default_rss_hena(pf
);
3416 /* send the response back to the VF */
3417 aq_ret
= i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_RSS_HENA_CAPS
,
3418 aq_ret
, (u8
*)vrh
, len
);
3424 * i40e_vc_set_rss_hena
3425 * @vf: pointer to the VF info
3426 * @msg: pointer to the msg buffer
3428 * Set the RSS HENA bits for the VF
3430 static int i40e_vc_set_rss_hena(struct i40e_vf
*vf
, u8
*msg
)
3432 struct virtchnl_rss_hena
*vrh
=
3433 (struct virtchnl_rss_hena
*)msg
;
3434 struct i40e_pf
*pf
= vf
->pf
;
3435 struct i40e_hw
*hw
= &pf
->hw
;
3438 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3442 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA1(0, vf
->vf_id
), (u32
)vrh
->hena
);
3443 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA1(1, vf
->vf_id
),
3444 (u32
)(vrh
->hena
>> 32));
3446 /* send the response to the VF */
3448 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_SET_RSS_HENA
, aq_ret
);
3452 * i40e_vc_enable_vlan_stripping
3453 * @vf: pointer to the VF info
3454 * @msg: pointer to the msg buffer
3456 * Enable vlan header stripping for the VF
3458 static int i40e_vc_enable_vlan_stripping(struct i40e_vf
*vf
, u8
*msg
)
3460 struct i40e_vsi
*vsi
;
3463 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3468 vsi
= vf
->pf
->vsi
[vf
->lan_vsi_idx
];
3469 i40e_vlan_stripping_enable(vsi
);
3471 /* send the response to the VF */
3473 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
,
3478 * i40e_vc_disable_vlan_stripping
3479 * @vf: pointer to the VF info
3480 * @msg: pointer to the msg buffer
3482 * Disable vlan header stripping for the VF
3484 static int i40e_vc_disable_vlan_stripping(struct i40e_vf
*vf
, u8
*msg
)
3486 struct i40e_vsi
*vsi
;
3489 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3494 vsi
= vf
->pf
->vsi
[vf
->lan_vsi_idx
];
3495 i40e_vlan_stripping_disable(vsi
);
3497 /* send the response to the VF */
3499 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
,
3504 * i40e_validate_cloud_filter
3505 * @vf: pointer to VF structure
3506 * @tc_filter: pointer to filter requested
3508 * This function validates cloud filter programmed as TC filter for ADq
3510 static int i40e_validate_cloud_filter(struct i40e_vf
*vf
,
3511 struct virtchnl_filter
*tc_filter
)
3513 struct virtchnl_l4_spec mask
= tc_filter
->mask
.tcp_spec
;
3514 struct virtchnl_l4_spec data
= tc_filter
->data
.tcp_spec
;
3515 struct i40e_pf
*pf
= vf
->pf
;
3516 struct i40e_vsi
*vsi
= NULL
;
3517 struct i40e_mac_filter
*f
;
3518 struct hlist_node
*h
;
3522 if (!tc_filter
->action
) {
3523 dev_info(&pf
->pdev
->dev
,
3524 "VF %d: Currently ADq doesn't support Drop Action\n",
3529 /* action_meta is TC number here to which the filter is applied */
3530 if (!tc_filter
->action_meta
||
3531 tc_filter
->action_meta
> I40E_MAX_VF_VSI
) {
3532 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid TC number %u\n",
3533 vf
->vf_id
, tc_filter
->action_meta
);
3537 /* Check filter if it's programmed for advanced mode or basic mode.
3538 * There are two ADq modes (for VF only),
3539 * 1. Basic mode: intended to allow as many filter options as possible
3540 * to be added to a VF in Non-trusted mode. Main goal is
3541 * to add filters to its own MAC and VLAN id.
3542 * 2. Advanced mode: is for allowing filters to be applied other than
3543 * its own MAC or VLAN. This mode requires the VF to be
3546 if (mask
.dst_mac
[0] && !mask
.dst_ip
[0]) {
3547 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3548 f
= i40e_find_mac(vsi
, data
.dst_mac
);
3551 dev_info(&pf
->pdev
->dev
,
3552 "Destination MAC %pM doesn't belong to VF %d\n",
3553 data
.dst_mac
, vf
->vf_id
);
3558 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
,
3560 if (f
->vlan
== ntohs(data
.vlan_id
)) {
3566 dev_info(&pf
->pdev
->dev
,
3567 "VF %d doesn't have any VLAN id %u\n",
3568 vf
->vf_id
, ntohs(data
.vlan_id
));
3573 /* Check if VF is trusted */
3574 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
3575 dev_err(&pf
->pdev
->dev
,
3576 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3582 if (mask
.dst_mac
[0] & data
.dst_mac
[0]) {
3583 if (is_broadcast_ether_addr(data
.dst_mac
) ||
3584 is_zero_ether_addr(data
.dst_mac
)) {
3585 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Dest MAC addr %pM\n",
3586 vf
->vf_id
, data
.dst_mac
);
3591 if (mask
.src_mac
[0] & data
.src_mac
[0]) {
3592 if (is_broadcast_ether_addr(data
.src_mac
) ||
3593 is_zero_ether_addr(data
.src_mac
)) {
3594 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Source MAC addr %pM\n",
3595 vf
->vf_id
, data
.src_mac
);
3600 if (mask
.dst_port
& data
.dst_port
) {
3601 if (!data
.dst_port
) {
3602 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Dest port\n",
3608 if (mask
.src_port
& data
.src_port
) {
3609 if (!data
.src_port
) {
3610 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Source port\n",
3616 if (tc_filter
->flow_type
!= VIRTCHNL_TCP_V6_FLOW
&&
3617 tc_filter
->flow_type
!= VIRTCHNL_TCP_V4_FLOW
) {
3618 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Flow type\n",
3623 if (mask
.vlan_id
& data
.vlan_id
) {
3624 if (ntohs(data
.vlan_id
) > I40E_MAX_VLANID
) {
3625 dev_info(&pf
->pdev
->dev
, "VF %d: invalid VLAN ID\n",
3637 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3638 * @vf: pointer to the VF info
3639 * @seid: seid of the vsi it is searching for
3641 static struct i40e_vsi
*i40e_find_vsi_from_seid(struct i40e_vf
*vf
, u16 seid
)
3643 struct i40e_pf
*pf
= vf
->pf
;
3644 struct i40e_vsi
*vsi
= NULL
;
3647 for (i
= 0; i
< vf
->num_tc
; i
++) {
3648 vsi
= i40e_find_vsi_from_id(pf
, vf
->ch
[i
].vsi_id
);
3649 if (vsi
&& vsi
->seid
== seid
)
3656 * i40e_del_all_cloud_filters
3657 * @vf: pointer to the VF info
3659 * This function deletes all cloud filters
3661 static void i40e_del_all_cloud_filters(struct i40e_vf
*vf
)
3663 struct i40e_cloud_filter
*cfilter
= NULL
;
3664 struct i40e_pf
*pf
= vf
->pf
;
3665 struct i40e_vsi
*vsi
= NULL
;
3666 struct hlist_node
*node
;
3669 hlist_for_each_entry_safe(cfilter
, node
,
3670 &vf
->cloud_filter_list
, cloud_node
) {
3671 vsi
= i40e_find_vsi_from_seid(vf
, cfilter
->seid
);
3674 dev_err(&pf
->pdev
->dev
, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3675 vf
->vf_id
, cfilter
->seid
);
3679 if (cfilter
->dst_port
)
3680 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
3683 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, false);
3685 dev_err(&pf
->pdev
->dev
,
3686 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3687 vf
->vf_id
, ERR_PTR(ret
),
3688 i40e_aq_str(&pf
->hw
,
3689 pf
->hw
.aq
.asq_last_status
));
3691 hlist_del(&cfilter
->cloud_node
);
3693 vf
->num_cloud_filters
--;
3698 * i40e_vc_del_cloud_filter
3699 * @vf: pointer to the VF info
3700 * @msg: pointer to the msg buffer
3702 * This function deletes a cloud filter programmed as TC filter for ADq
3704 static int i40e_vc_del_cloud_filter(struct i40e_vf
*vf
, u8
*msg
)
3706 struct virtchnl_filter
*vcf
= (struct virtchnl_filter
*)msg
;
3707 struct virtchnl_l4_spec mask
= vcf
->mask
.tcp_spec
;
3708 struct virtchnl_l4_spec tcf
= vcf
->data
.tcp_spec
;
3709 struct i40e_cloud_filter cfilter
, *cf
= NULL
;
3710 struct i40e_pf
*pf
= vf
->pf
;
3711 struct i40e_vsi
*vsi
= NULL
;
3712 struct hlist_node
*node
;
3716 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3721 if (!vf
->adq_enabled
) {
3722 dev_info(&pf
->pdev
->dev
,
3723 "VF %d: ADq not enabled, can't apply cloud filter\n",
3729 if (i40e_validate_cloud_filter(vf
, vcf
)) {
3730 dev_info(&pf
->pdev
->dev
,
3731 "VF %d: Invalid input, can't apply cloud filter\n",
3737 memset(&cfilter
, 0, sizeof(cfilter
));
3738 /* parse destination mac address */
3739 for (i
= 0; i
< ETH_ALEN
; i
++)
3740 cfilter
.dst_mac
[i
] = mask
.dst_mac
[i
] & tcf
.dst_mac
[i
];
3742 /* parse source mac address */
3743 for (i
= 0; i
< ETH_ALEN
; i
++)
3744 cfilter
.src_mac
[i
] = mask
.src_mac
[i
] & tcf
.src_mac
[i
];
3746 cfilter
.vlan_id
= mask
.vlan_id
& tcf
.vlan_id
;
3747 cfilter
.dst_port
= mask
.dst_port
& tcf
.dst_port
;
3748 cfilter
.src_port
= mask
.src_port
& tcf
.src_port
;
3750 switch (vcf
->flow_type
) {
3751 case VIRTCHNL_TCP_V4_FLOW
:
3752 cfilter
.n_proto
= ETH_P_IP
;
3753 if (mask
.dst_ip
[0] & tcf
.dst_ip
[0])
3754 memcpy(&cfilter
.ip
.v4
.dst_ip
, tcf
.dst_ip
,
3755 ARRAY_SIZE(tcf
.dst_ip
));
3756 else if (mask
.src_ip
[0] & tcf
.dst_ip
[0])
3757 memcpy(&cfilter
.ip
.v4
.src_ip
, tcf
.src_ip
,
3758 ARRAY_SIZE(tcf
.dst_ip
));
3760 case VIRTCHNL_TCP_V6_FLOW
:
3761 cfilter
.n_proto
= ETH_P_IPV6
;
3762 if (mask
.dst_ip
[3] & tcf
.dst_ip
[3])
3763 memcpy(&cfilter
.ip
.v6
.dst_ip6
, tcf
.dst_ip
,
3764 sizeof(cfilter
.ip
.v6
.dst_ip6
));
3765 if (mask
.src_ip
[3] & tcf
.src_ip
[3])
3766 memcpy(&cfilter
.ip
.v6
.src_ip6
, tcf
.src_ip
,
3767 sizeof(cfilter
.ip
.v6
.src_ip6
));
3770 /* TC filter can be configured based on different combinations
3771 * and in this case IP is not a part of filter config
3773 dev_info(&pf
->pdev
->dev
, "VF %d: Flow type not configured\n",
3777 /* get the vsi to which the tc belongs to */
3778 vsi
= pf
->vsi
[vf
->ch
[vcf
->action_meta
].vsi_idx
];
3779 cfilter
.seid
= vsi
->seid
;
3780 cfilter
.flags
= vcf
->field_flags
;
3782 /* Deleting TC filter */
3784 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, &cfilter
, false);
3786 ret
= i40e_add_del_cloud_filter(vsi
, &cfilter
, false);
3788 dev_err(&pf
->pdev
->dev
,
3789 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3790 vf
->vf_id
, ERR_PTR(ret
),
3791 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
3795 hlist_for_each_entry_safe(cf
, node
,
3796 &vf
->cloud_filter_list
, cloud_node
) {
3797 if (cf
->seid
!= cfilter
.seid
)
3800 if (cfilter
.dst_port
!= cf
->dst_port
)
3802 if (mask
.dst_mac
[0])
3803 if (!ether_addr_equal(cf
->src_mac
, cfilter
.src_mac
))
3805 /* for ipv4 data to be valid, only first byte of mask is set */
3806 if (cfilter
.n_proto
== ETH_P_IP
&& mask
.dst_ip
[0])
3807 if (memcmp(&cfilter
.ip
.v4
.dst_ip
, &cf
->ip
.v4
.dst_ip
,
3808 ARRAY_SIZE(tcf
.dst_ip
)))
3810 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3811 if (cfilter
.n_proto
== ETH_P_IPV6
&& mask
.dst_ip
[3])
3812 if (memcmp(&cfilter
.ip
.v6
.dst_ip6
, &cf
->ip
.v6
.dst_ip6
,
3813 sizeof(cfilter
.ip
.v6
.src_ip6
)))
3816 if (cfilter
.vlan_id
!= cf
->vlan_id
)
3819 hlist_del(&cf
->cloud_node
);
3821 vf
->num_cloud_filters
--;
3825 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_CLOUD_FILTER
,
3830 * i40e_vc_add_cloud_filter
3831 * @vf: pointer to the VF info
3832 * @msg: pointer to the msg buffer
3834 * This function adds a cloud filter programmed as TC filter for ADq
3836 static int i40e_vc_add_cloud_filter(struct i40e_vf
*vf
, u8
*msg
)
3838 struct virtchnl_filter
*vcf
= (struct virtchnl_filter
*)msg
;
3839 struct virtchnl_l4_spec mask
= vcf
->mask
.tcp_spec
;
3840 struct virtchnl_l4_spec tcf
= vcf
->data
.tcp_spec
;
3841 struct i40e_cloud_filter
*cfilter
= NULL
;
3842 struct i40e_pf
*pf
= vf
->pf
;
3843 struct i40e_vsi
*vsi
= NULL
;
3847 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3852 if (!vf
->adq_enabled
) {
3853 dev_info(&pf
->pdev
->dev
,
3854 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3860 if (i40e_validate_cloud_filter(vf
, vcf
)) {
3861 dev_info(&pf
->pdev
->dev
,
3862 "VF %d: Invalid input/s, can't apply cloud filter\n",
3868 cfilter
= kzalloc(sizeof(*cfilter
), GFP_KERNEL
);
3872 /* parse destination mac address */
3873 for (i
= 0; i
< ETH_ALEN
; i
++)
3874 cfilter
->dst_mac
[i
] = mask
.dst_mac
[i
] & tcf
.dst_mac
[i
];
3876 /* parse source mac address */
3877 for (i
= 0; i
< ETH_ALEN
; i
++)
3878 cfilter
->src_mac
[i
] = mask
.src_mac
[i
] & tcf
.src_mac
[i
];
3880 cfilter
->vlan_id
= mask
.vlan_id
& tcf
.vlan_id
;
3881 cfilter
->dst_port
= mask
.dst_port
& tcf
.dst_port
;
3882 cfilter
->src_port
= mask
.src_port
& tcf
.src_port
;
3884 switch (vcf
->flow_type
) {
3885 case VIRTCHNL_TCP_V4_FLOW
:
3886 cfilter
->n_proto
= ETH_P_IP
;
3887 if (mask
.dst_ip
[0] & tcf
.dst_ip
[0])
3888 memcpy(&cfilter
->ip
.v4
.dst_ip
, tcf
.dst_ip
,
3889 ARRAY_SIZE(tcf
.dst_ip
));
3890 else if (mask
.src_ip
[0] & tcf
.dst_ip
[0])
3891 memcpy(&cfilter
->ip
.v4
.src_ip
, tcf
.src_ip
,
3892 ARRAY_SIZE(tcf
.dst_ip
));
3894 case VIRTCHNL_TCP_V6_FLOW
:
3895 cfilter
->n_proto
= ETH_P_IPV6
;
3896 if (mask
.dst_ip
[3] & tcf
.dst_ip
[3])
3897 memcpy(&cfilter
->ip
.v6
.dst_ip6
, tcf
.dst_ip
,
3898 sizeof(cfilter
->ip
.v6
.dst_ip6
));
3899 if (mask
.src_ip
[3] & tcf
.src_ip
[3])
3900 memcpy(&cfilter
->ip
.v6
.src_ip6
, tcf
.src_ip
,
3901 sizeof(cfilter
->ip
.v6
.src_ip6
));
3904 /* TC filter can be configured based on different combinations
3905 * and in this case IP is not a part of filter config
3907 dev_info(&pf
->pdev
->dev
, "VF %d: Flow type not configured\n",
3911 /* get the VSI to which the TC belongs to */
3912 vsi
= pf
->vsi
[vf
->ch
[vcf
->action_meta
].vsi_idx
];
3913 cfilter
->seid
= vsi
->seid
;
3914 cfilter
->flags
= vcf
->field_flags
;
3916 /* Adding cloud filter programmed as TC filter */
3918 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
, true);
3920 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
3922 dev_err(&pf
->pdev
->dev
,
3923 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3924 vf
->vf_id
, ERR_PTR(ret
),
3925 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
3929 INIT_HLIST_NODE(&cfilter
->cloud_node
);
3930 hlist_add_head(&cfilter
->cloud_node
, &vf
->cloud_filter_list
);
3931 /* release the pointer passing it to the collection */
3933 vf
->num_cloud_filters
++;
3937 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ADD_CLOUD_FILTER
,
3942 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3943 * @vf: pointer to the VF info
3944 * @msg: pointer to the msg buffer
3946 static int i40e_vc_add_qch_msg(struct i40e_vf
*vf
, u8
*msg
)
3948 struct virtchnl_tc_info
*tci
=
3949 (struct virtchnl_tc_info
*)msg
;
3950 struct i40e_pf
*pf
= vf
->pf
;
3951 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
3952 int i
, adq_request_qps
= 0;
3956 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3961 /* ADq cannot be applied if spoof check is ON */
3963 dev_err(&pf
->pdev
->dev
,
3964 "Spoof check is ON, turn it OFF to enable ADq\n");
3969 if (!(vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ADQ
)) {
3970 dev_err(&pf
->pdev
->dev
,
3971 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3977 /* max number of traffic classes for VF currently capped at 4 */
3978 if (!tci
->num_tc
|| tci
->num_tc
> I40E_MAX_VF_VSI
) {
3979 dev_err(&pf
->pdev
->dev
,
3980 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3981 vf
->vf_id
, tci
->num_tc
, I40E_MAX_VF_VSI
);
3986 /* validate queues for each TC */
3987 for (i
= 0; i
< tci
->num_tc
; i
++)
3988 if (!tci
->list
[i
].count
||
3989 tci
->list
[i
].count
> I40E_DEFAULT_QUEUES_PER_VF
) {
3990 dev_err(&pf
->pdev
->dev
,
3991 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3992 vf
->vf_id
, i
, tci
->list
[i
].count
,
3993 I40E_DEFAULT_QUEUES_PER_VF
);
3998 /* need Max VF queues but already have default number of queues */
3999 adq_request_qps
= I40E_MAX_VF_QUEUES
- I40E_DEFAULT_QUEUES_PER_VF
;
4001 if (pf
->queues_left
< adq_request_qps
) {
4002 dev_err(&pf
->pdev
->dev
,
4003 "No queues left to allocate to VF %d\n",
4008 /* we need to allocate max VF queues to enable ADq so as to
4009 * make sure ADq enabled VF always gets back queues when it
4010 * goes through a reset.
4012 vf
->num_queue_pairs
= I40E_MAX_VF_QUEUES
;
4015 /* get link speed in MB to validate rate limit */
4016 speed
= i40e_vc_link_speed2mbps(ls
->link_speed
);
4017 if (speed
== SPEED_UNKNOWN
) {
4018 dev_err(&pf
->pdev
->dev
,
4019 "Cannot detect link speed\n");
4024 /* parse data from the queue channel info */
4025 vf
->num_tc
= tci
->num_tc
;
4026 for (i
= 0; i
< vf
->num_tc
; i
++) {
4027 if (tci
->list
[i
].max_tx_rate
) {
4028 if (tci
->list
[i
].max_tx_rate
> speed
) {
4029 dev_err(&pf
->pdev
->dev
,
4030 "Invalid max tx rate %llu specified for VF %d.",
4031 tci
->list
[i
].max_tx_rate
,
4036 vf
->ch
[i
].max_tx_rate
=
4037 tci
->list
[i
].max_tx_rate
;
4040 vf
->ch
[i
].num_qps
= tci
->list
[i
].count
;
4043 /* set this flag only after making sure all inputs are sane */
4044 vf
->adq_enabled
= true;
4046 /* reset the VF in order to allocate resources */
4047 i40e_vc_reset_vf(vf
, true);
4051 /* send the response to the VF */
4053 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_CHANNELS
,
4058 * i40e_vc_del_qch_msg
4059 * @vf: pointer to the VF info
4060 * @msg: pointer to the msg buffer
4062 static int i40e_vc_del_qch_msg(struct i40e_vf
*vf
, u8
*msg
)
4064 struct i40e_pf
*pf
= vf
->pf
;
4067 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
4072 if (vf
->adq_enabled
) {
4073 i40e_del_all_cloud_filters(vf
);
4075 vf
->adq_enabled
= false;
4077 dev_info(&pf
->pdev
->dev
,
4078 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4081 dev_info(&pf
->pdev
->dev
, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4086 /* reset the VF in order to allocate resources */
4087 i40e_vc_reset_vf(vf
, true);
4092 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_CHANNELS
,
4097 * i40e_vc_process_vf_msg
4098 * @pf: pointer to the PF structure
4099 * @vf_id: source VF id
4100 * @v_opcode: operation code
4101 * @v_retval: unused return value code
4102 * @msg: pointer to the msg buffer
4103 * @msglen: msg length
4105 * called from the common aeq/arq handler to
4106 * process request from VF
4108 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, s16 vf_id
, u32 v_opcode
,
4109 u32 __always_unused v_retval
, u8
*msg
, u16 msglen
)
4111 struct i40e_hw
*hw
= &pf
->hw
;
4112 int local_vf_id
= vf_id
- (s16
)hw
->func_caps
.vf_base_id
;
4116 pf
->vf_aq_requests
++;
4117 if (local_vf_id
< 0 || local_vf_id
>= pf
->num_alloc_vfs
)
4119 vf
= &(pf
->vf
[local_vf_id
]);
4121 /* Check if VF is disabled. */
4122 if (test_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
))
4125 /* perform basic checks on the msg */
4126 ret
= virtchnl_vc_validate_vf_msg(&vf
->vf_ver
, v_opcode
, msg
, msglen
);
4129 i40e_vc_send_resp_to_vf(vf
, v_opcode
, -EINVAL
);
4130 dev_err(&pf
->pdev
->dev
, "Invalid message from VF %d, opcode %d, len %d\n",
4131 local_vf_id
, v_opcode
, msglen
);
4136 case VIRTCHNL_OP_VERSION
:
4137 ret
= i40e_vc_get_version_msg(vf
, msg
);
4139 case VIRTCHNL_OP_GET_VF_RESOURCES
:
4140 ret
= i40e_vc_get_vf_resources_msg(vf
, msg
);
4141 i40e_vc_notify_vf_link_state(vf
);
4143 case VIRTCHNL_OP_RESET_VF
:
4144 i40e_vc_reset_vf(vf
, false);
4147 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
4148 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
);
4150 case VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
4151 ret
= i40e_vc_config_queues_msg(vf
, msg
);
4153 case VIRTCHNL_OP_CONFIG_IRQ_MAP
:
4154 ret
= i40e_vc_config_irq_map_msg(vf
, msg
);
4156 case VIRTCHNL_OP_ENABLE_QUEUES
:
4157 ret
= i40e_vc_enable_queues_msg(vf
, msg
);
4158 i40e_vc_notify_vf_link_state(vf
);
4160 case VIRTCHNL_OP_DISABLE_QUEUES
:
4161 ret
= i40e_vc_disable_queues_msg(vf
, msg
);
4163 case VIRTCHNL_OP_ADD_ETH_ADDR
:
4164 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
);
4166 case VIRTCHNL_OP_DEL_ETH_ADDR
:
4167 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
);
4169 case VIRTCHNL_OP_ADD_VLAN
:
4170 ret
= i40e_vc_add_vlan_msg(vf
, msg
);
4172 case VIRTCHNL_OP_DEL_VLAN
:
4173 ret
= i40e_vc_remove_vlan_msg(vf
, msg
);
4175 case VIRTCHNL_OP_GET_STATS
:
4176 ret
= i40e_vc_get_stats_msg(vf
, msg
);
4178 case VIRTCHNL_OP_RDMA
:
4179 ret
= i40e_vc_rdma_msg(vf
, msg
, msglen
);
4181 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
:
4182 ret
= i40e_vc_rdma_qvmap_msg(vf
, msg
, true);
4184 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP
:
4185 ret
= i40e_vc_rdma_qvmap_msg(vf
, msg
, false);
4187 case VIRTCHNL_OP_CONFIG_RSS_KEY
:
4188 ret
= i40e_vc_config_rss_key(vf
, msg
);
4190 case VIRTCHNL_OP_CONFIG_RSS_LUT
:
4191 ret
= i40e_vc_config_rss_lut(vf
, msg
);
4193 case VIRTCHNL_OP_GET_RSS_HENA_CAPS
:
4194 ret
= i40e_vc_get_rss_hena(vf
, msg
);
4196 case VIRTCHNL_OP_SET_RSS_HENA
:
4197 ret
= i40e_vc_set_rss_hena(vf
, msg
);
4199 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
:
4200 ret
= i40e_vc_enable_vlan_stripping(vf
, msg
);
4202 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
:
4203 ret
= i40e_vc_disable_vlan_stripping(vf
, msg
);
4205 case VIRTCHNL_OP_REQUEST_QUEUES
:
4206 ret
= i40e_vc_request_queues_msg(vf
, msg
);
4208 case VIRTCHNL_OP_ENABLE_CHANNELS
:
4209 ret
= i40e_vc_add_qch_msg(vf
, msg
);
4211 case VIRTCHNL_OP_DISABLE_CHANNELS
:
4212 ret
= i40e_vc_del_qch_msg(vf
, msg
);
4214 case VIRTCHNL_OP_ADD_CLOUD_FILTER
:
4215 ret
= i40e_vc_add_cloud_filter(vf
, msg
);
4217 case VIRTCHNL_OP_DEL_CLOUD_FILTER
:
4218 ret
= i40e_vc_del_cloud_filter(vf
, msg
);
4220 case VIRTCHNL_OP_UNKNOWN
:
4222 dev_err(&pf
->pdev
->dev
, "Unsupported opcode %d from VF %d\n",
4223 v_opcode
, local_vf_id
);
4224 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
4233 * i40e_vc_process_vflr_event
4234 * @pf: pointer to the PF structure
4236 * called from the vlfr irq handler to
4237 * free up VF resources and state variables
4239 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
4241 struct i40e_hw
*hw
= &pf
->hw
;
4242 u32 reg
, reg_idx
, bit_idx
;
4246 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
4249 /* Re-enable the VFLR interrupt cause here, before looking for which
4250 * VF got reset. Otherwise, if another VF gets a reset while the
4251 * first one is being processed, that interrupt will be lost, and
4252 * that VF will be stuck in reset forever.
4254 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
4255 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
4256 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
4259 clear_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
4260 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
4261 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
4262 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
4263 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4264 vf
= &pf
->vf
[vf_id
];
4265 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
4266 if (reg
& BIT(bit_idx
))
4267 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4268 i40e_reset_vf(vf
, true);
4276 * @pf: the physical function
4277 * @vf_id: VF identifier
4279 * Check that the VF is enabled and the VSI exists.
4281 * Returns 0 on success, negative on failure
4283 static int i40e_validate_vf(struct i40e_pf
*pf
, int vf_id
)
4285 struct i40e_vsi
*vsi
;
4289 if (vf_id
>= pf
->num_alloc_vfs
) {
4290 dev_err(&pf
->pdev
->dev
,
4291 "Invalid VF Identifier %d\n", vf_id
);
4295 vf
= &pf
->vf
[vf_id
];
4296 vsi
= i40e_find_vsi_from_id(pf
, vf
->lan_vsi_id
);
4304 * i40e_check_vf_init_timeout
4305 * @vf: the virtual function
4307 * Check that the VF's initialization was successfully done and if not
4308 * wait up to 300ms for its finish.
4310 * Returns true when VF is initialized, false on timeout
4312 static bool i40e_check_vf_init_timeout(struct i40e_vf
*vf
)
4316 /* When the VF is resetting wait until it is done.
4317 * It can take up to 200 milliseconds, but wait for
4318 * up to 300 milliseconds to be safe.
4320 for (i
= 0; i
< 15; i
++) {
4321 if (test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
))
4326 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
)) {
4327 dev_err(&vf
->pf
->pdev
->dev
,
4328 "VF %d still in reset. Try again.\n", vf
->vf_id
);
4336 * i40e_ndo_set_vf_mac
4337 * @netdev: network interface device structure
4338 * @vf_id: VF identifier
4341 * program VF mac address
4343 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
4345 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4346 struct i40e_vsi
*vsi
= np
->vsi
;
4347 struct i40e_pf
*pf
= vsi
->back
;
4348 struct i40e_mac_filter
*f
;
4351 struct hlist_node
*h
;
4354 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4355 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4359 /* validate the request */
4360 ret
= i40e_validate_vf(pf
, vf_id
);
4364 vf
= &pf
->vf
[vf_id
];
4365 if (!i40e_check_vf_init_timeout(vf
)) {
4369 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4371 if (is_multicast_ether_addr(mac
)) {
4372 dev_err(&pf
->pdev
->dev
,
4373 "Invalid Ethernet address %pM for VF %d\n", mac
, vf_id
);
4378 /* Lock once because below invoked function add/del_filter requires
4379 * mac_filter_hash_lock to be held
4381 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4383 /* delete the temporary mac address */
4384 if (!is_zero_ether_addr(vf
->default_lan_addr
.addr
))
4385 i40e_del_mac_filter(vsi
, vf
->default_lan_addr
.addr
);
4387 /* Delete all the filters for this VSI - we're going to kill it
4390 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
4391 __i40e_del_filter(vsi
, f
);
4393 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4395 /* program mac filter */
4396 if (i40e_sync_vsi_filters(vsi
)) {
4397 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
4401 ether_addr_copy(vf
->default_lan_addr
.addr
, mac
);
4403 if (is_zero_ether_addr(mac
)) {
4404 vf
->pf_set_mac
= false;
4405 dev_info(&pf
->pdev
->dev
, "Removing MAC on VF %d\n", vf_id
);
4407 vf
->pf_set_mac
= true;
4408 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n",
4412 /* Force the VF interface down so it has to bring up with new MAC
4415 i40e_vc_reset_vf(vf
, true);
4416 dev_info(&pf
->pdev
->dev
, "Bring down and up the VF interface to make this change effective.\n");
4419 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4424 * i40e_ndo_set_vf_port_vlan
4425 * @netdev: network interface device structure
4426 * @vf_id: VF identifier
4427 * @vlan_id: mac address
4428 * @qos: priority setting
4429 * @vlan_proto: vlan protocol
4431 * program VF vlan id and/or qos
4433 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
, int vf_id
,
4434 u16 vlan_id
, u8 qos
, __be16 vlan_proto
)
4436 u16 vlanprio
= vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
);
4437 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4438 bool allmulti
= false, alluni
= false;
4439 struct i40e_pf
*pf
= np
->vsi
->back
;
4440 struct i40e_vsi
*vsi
;
4444 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4445 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4449 /* validate the request */
4450 ret
= i40e_validate_vf(pf
, vf_id
);
4454 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
4455 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
4460 if (vlan_proto
!= htons(ETH_P_8021Q
)) {
4461 dev_err(&pf
->pdev
->dev
, "VF VLAN protocol is not supported\n");
4462 ret
= -EPROTONOSUPPORT
;
4466 vf
= &pf
->vf
[vf_id
];
4467 if (!i40e_check_vf_init_timeout(vf
)) {
4471 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4473 if (le16_to_cpu(vsi
->info
.pvid
) == vlanprio
)
4474 /* duplicate request, so just return success */
4477 i40e_vlan_stripping_enable(vsi
);
4478 i40e_vc_reset_vf(vf
, true);
4479 /* During reset the VF got a new VSI, so refresh a pointer. */
4480 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4481 /* Locked once because multiple functions below iterate list */
4482 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4484 /* Check for condition where there was already a port VLAN ID
4485 * filter set and now it is being deleted by setting it to zero.
4486 * Additionally check for the condition where there was a port
4487 * VLAN but now there is a new and different port VLAN being set.
4488 * Before deleting all the old VLAN filters we must add new ones
4489 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4490 * MAC addresses deleted.
4492 if ((!(vlan_id
|| qos
) ||
4493 vlanprio
!= le16_to_cpu(vsi
->info
.pvid
)) &&
4495 ret
= i40e_add_vlan_all_mac(vsi
, I40E_VLAN_ANY
);
4497 dev_info(&vsi
->back
->pdev
->dev
,
4498 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
4499 vsi
->back
->hw
.aq
.asq_last_status
);
4500 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4505 if (vsi
->info
.pvid
) {
4506 /* remove all filters on the old VLAN */
4507 i40e_rm_vlan_all_mac(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
4511 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4513 /* disable promisc modes in case they were enabled */
4514 ret
= i40e_config_vf_promiscuous_mode(vf
, vf
->lan_vsi_id
,
4517 dev_err(&pf
->pdev
->dev
, "Unable to config VF promiscuous mode\n");
4522 ret
= i40e_vsi_add_pvid(vsi
, vlanprio
);
4524 i40e_vsi_remove_pvid(vsi
);
4525 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4528 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4529 vlan_id
, qos
, vf_id
);
4531 /* add new VLAN filter for each MAC */
4532 ret
= i40e_add_vlan_all_mac(vsi
, vlan_id
);
4534 dev_info(&vsi
->back
->pdev
->dev
,
4535 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
4536 vsi
->back
->hw
.aq
.asq_last_status
);
4537 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4541 /* remove the previously added non-VLAN MAC filters */
4542 i40e_rm_vlan_all_mac(vsi
, I40E_VLAN_ANY
);
4545 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4547 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
4550 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
4553 /* Schedule the worker thread to take care of applying changes */
4554 i40e_service_event_schedule(vsi
->back
);
4557 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
4561 /* The Port VLAN needs to be saved across resets the same as the
4562 * default LAN MAC address.
4564 vf
->port_vlan_id
= le16_to_cpu(vsi
->info
.pvid
);
4566 ret
= i40e_config_vf_promiscuous_mode(vf
, vsi
->id
, allmulti
, alluni
);
4568 dev_err(&pf
->pdev
->dev
, "Unable to config vf promiscuous mode\n");
4575 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4580 * i40e_ndo_set_vf_bw
4581 * @netdev: network interface device structure
4582 * @vf_id: VF identifier
4583 * @min_tx_rate: Minimum Tx rate
4584 * @max_tx_rate: Maximum Tx rate
4586 * configure VF Tx rate
4588 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int min_tx_rate
,
4591 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4592 struct i40e_pf
*pf
= np
->vsi
->back
;
4593 struct i40e_vsi
*vsi
;
4597 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4598 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4602 /* validate the request */
4603 ret
= i40e_validate_vf(pf
, vf_id
);
4608 dev_err(&pf
->pdev
->dev
, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4609 min_tx_rate
, vf_id
);
4614 vf
= &pf
->vf
[vf_id
];
4615 if (!i40e_check_vf_init_timeout(vf
)) {
4619 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4621 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
4625 vf
->tx_rate
= max_tx_rate
;
4627 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4632 * i40e_ndo_get_vf_config
4633 * @netdev: network interface device structure
4634 * @vf_id: VF identifier
4635 * @ivi: VF configuration structure
4637 * return VF configuration
4639 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
4640 int vf_id
, struct ifla_vf_info
*ivi
)
4642 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4643 struct i40e_vsi
*vsi
= np
->vsi
;
4644 struct i40e_pf
*pf
= vsi
->back
;
4648 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4649 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4653 /* validate the request */
4654 ret
= i40e_validate_vf(pf
, vf_id
);
4658 vf
= &pf
->vf
[vf_id
];
4659 /* first vsi is always the LAN vsi */
4660 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4668 ether_addr_copy(ivi
->mac
, vf
->default_lan_addr
.addr
);
4670 ivi
->max_tx_rate
= vf
->tx_rate
;
4671 ivi
->min_tx_rate
= 0;
4672 ivi
->vlan
= le16_to_cpu(vsi
->info
.pvid
) & I40E_VLAN_MASK
;
4673 ivi
->qos
= (le16_to_cpu(vsi
->info
.pvid
) & I40E_PRIORITY_MASK
) >>
4674 I40E_VLAN_PRIORITY_SHIFT
;
4675 if (vf
->link_forced
== false)
4676 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
4677 else if (vf
->link_up
== true)
4678 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
4680 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
4681 ivi
->spoofchk
= vf
->spoofchk
;
4682 ivi
->trusted
= vf
->trusted
;
4686 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4691 * i40e_ndo_set_vf_link_state
4692 * @netdev: network interface device structure
4693 * @vf_id: VF identifier
4694 * @link: required link state
4696 * Set the link state of a specified VF, regardless of physical link state
4698 int i40e_ndo_set_vf_link_state(struct net_device
*netdev
, int vf_id
, int link
)
4700 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4701 struct i40e_pf
*pf
= np
->vsi
->back
;
4702 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
4703 struct virtchnl_pf_event pfe
;
4704 struct i40e_hw
*hw
= &pf
->hw
;
4709 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4710 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4714 /* validate the request */
4715 if (vf_id
>= pf
->num_alloc_vfs
) {
4716 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4721 vf
= &pf
->vf
[vf_id
];
4722 abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
4724 pfe
.event
= VIRTCHNL_EVENT_LINK_CHANGE
;
4725 pfe
.severity
= PF_EVENT_SEVERITY_INFO
;
4728 case IFLA_VF_LINK_STATE_AUTO
:
4729 vf
->link_forced
= false;
4730 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4732 case IFLA_VF_LINK_STATE_ENABLE
:
4733 vf
->link_forced
= true;
4735 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4737 case IFLA_VF_LINK_STATE_DISABLE
:
4738 vf
->link_forced
= true;
4739 vf
->link_up
= false;
4740 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4746 /* Notify the VF of its new link state */
4747 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
4748 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);
4751 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4756 * i40e_ndo_set_vf_spoofchk
4757 * @netdev: network interface device structure
4758 * @vf_id: VF identifier
4759 * @enable: flag to enable or disable feature
4761 * Enable or disable VF spoof checking
4763 int i40e_ndo_set_vf_spoofchk(struct net_device
*netdev
, int vf_id
, bool enable
)
4765 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4766 struct i40e_vsi
*vsi
= np
->vsi
;
4767 struct i40e_pf
*pf
= vsi
->back
;
4768 struct i40e_vsi_context ctxt
;
4769 struct i40e_hw
*hw
= &pf
->hw
;
4773 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4774 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4778 /* validate the request */
4779 if (vf_id
>= pf
->num_alloc_vfs
) {
4780 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4785 vf
= &(pf
->vf
[vf_id
]);
4786 if (!i40e_check_vf_init_timeout(vf
)) {
4791 if (enable
== vf
->spoofchk
)
4794 vf
->spoofchk
= enable
;
4795 memset(&ctxt
, 0, sizeof(ctxt
));
4796 ctxt
.seid
= pf
->vsi
[vf
->lan_vsi_idx
]->seid
;
4797 ctxt
.pf_num
= pf
->hw
.pf_id
;
4798 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
4800 ctxt
.info
.sec_flags
|= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
4801 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
4802 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
4804 dev_err(&pf
->pdev
->dev
, "Error %d updating VSI parameters\n",
4809 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4814 * i40e_ndo_set_vf_trust
4815 * @netdev: network interface device structure of the pf
4816 * @vf_id: VF identifier
4817 * @setting: trust setting
4819 * Enable or disable VF trust setting
4821 int i40e_ndo_set_vf_trust(struct net_device
*netdev
, int vf_id
, bool setting
)
4823 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4824 struct i40e_pf
*pf
= np
->vsi
->back
;
4828 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4829 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4833 /* validate the request */
4834 if (vf_id
>= pf
->num_alloc_vfs
) {
4835 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4840 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
4841 dev_err(&pf
->pdev
->dev
, "Trusted VF not supported in MFP mode.\n");
4846 vf
= &pf
->vf
[vf_id
];
4848 if (setting
== vf
->trusted
)
4851 vf
->trusted
= setting
;
4853 /* request PF to sync mac/vlan filters for the VF */
4854 set_bit(__I40E_MACVLAN_SYNC_PENDING
, pf
->state
);
4855 pf
->vsi
[vf
->lan_vsi_idx
]->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
4857 i40e_vc_reset_vf(vf
, true);
4858 dev_info(&pf
->pdev
->dev
, "VF %u is now %strusted\n",
4859 vf_id
, setting
? "" : "un");
4861 if (vf
->adq_enabled
) {
4863 dev_info(&pf
->pdev
->dev
,
4864 "VF %u no longer Trusted, deleting all cloud filters\n",
4866 i40e_del_all_cloud_filters(vf
);
4871 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4876 * i40e_get_vf_stats - populate some stats for the VF
4877 * @netdev: the netdev of the PF
4878 * @vf_id: the host OS identifier (0-127)
4879 * @vf_stats: pointer to the OS memory to be initialized
4881 int i40e_get_vf_stats(struct net_device
*netdev
, int vf_id
,
4882 struct ifla_vf_stats
*vf_stats
)
4884 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4885 struct i40e_pf
*pf
= np
->vsi
->back
;
4886 struct i40e_eth_stats
*stats
;
4887 struct i40e_vsi
*vsi
;
4890 /* validate the request */
4891 if (i40e_validate_vf(pf
, vf_id
))
4894 vf
= &pf
->vf
[vf_id
];
4895 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
)) {
4896 dev_err(&pf
->pdev
->dev
, "VF %d in reset. Try again.\n", vf_id
);
4900 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4904 i40e_update_eth_stats(vsi
);
4905 stats
= &vsi
->eth_stats
;
4907 memset(vf_stats
, 0, sizeof(*vf_stats
));
4909 vf_stats
->rx_packets
= stats
->rx_unicast
+ stats
->rx_broadcast
+
4910 stats
->rx_multicast
;
4911 vf_stats
->tx_packets
= stats
->tx_unicast
+ stats
->tx_broadcast
+
4912 stats
->tx_multicast
;
4913 vf_stats
->rx_bytes
= stats
->rx_bytes
;
4914 vf_stats
->tx_bytes
= stats
->tx_bytes
;
4915 vf_stats
->broadcast
= stats
->rx_broadcast
;
4916 vf_stats
->multicast
= stats
->rx_multicast
;
4917 vf_stats
->rx_dropped
= stats
->rx_discards
;
4918 vf_stats
->tx_dropped
= stats
->tx_discards
;