1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclgevf_regs.h"
10 #include "hclge_mbx.h"
12 #include "hclgevf_devlink.h"
13 #include "hclge_comm_rss.h"
15 #define HCLGEVF_NAME "hclgevf"
17 #define HCLGEVF_RESET_MAX_FAIL_CNT 5
19 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
);
20 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
23 static struct hnae3_ae_algo ae_algovf
;
25 static struct workqueue_struct
*hclgevf_wq
;
27 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
28 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_VF
), 0},
29 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_RDMA_DCB_PFC_VF
),
30 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
31 /* required last entry */
35 MODULE_DEVICE_TABLE(pci
, ae_algovf_pci_tbl
);
37 /* hclgevf_cmd_send - send command to command queue
38 * @hw: pointer to the hw struct
39 * @desc: prefilled descriptor for describing the command
40 * @num : the number of descriptors to be sent
42 * This is the main send command for command queue, it
43 * sends the queue, cleans the queue, etc
45 int hclgevf_cmd_send(struct hclgevf_hw
*hw
, struct hclge_desc
*desc
, int num
)
47 return hclge_comm_cmd_send(&hw
->hw
, desc
, num
);
50 void hclgevf_arq_init(struct hclgevf_dev
*hdev
)
52 struct hclge_comm_cmq
*cmdq
= &hdev
->hw
.hw
.cmq
;
54 spin_lock(&cmdq
->crq
.lock
);
55 /* initialize the pointers of async rx queue of mailbox */
56 hdev
->arq
.hdev
= hdev
;
59 atomic_set(&hdev
->arq
.count
, 0);
60 spin_unlock(&cmdq
->crq
.lock
);
63 struct hclgevf_dev
*hclgevf_ae_get_hdev(struct hnae3_handle
*handle
)
66 return container_of(handle
, struct hclgevf_dev
, nic
);
67 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
68 return container_of(handle
, struct hclgevf_dev
, roce
);
70 return container_of(handle
, struct hclgevf_dev
, nic
);
73 static void hclgevf_update_stats(struct hnae3_handle
*handle
)
75 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
78 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
80 dev_err(&hdev
->pdev
->dev
,
81 "VF update of TQPS stats fail, status = %d.\n",
85 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
87 if (strset
== ETH_SS_TEST
)
89 else if (strset
== ETH_SS_STATS
)
90 return hclge_comm_tqps_get_sset_count(handle
);
95 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
100 if (strset
== ETH_SS_STATS
)
101 p
= hclge_comm_tqps_get_strings(handle
, p
);
104 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
106 hclge_comm_tqps_get_stats(handle
, data
);
109 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg
*msg
, u8 code
,
113 memset(msg
, 0, sizeof(struct hclge_vf_to_pf_msg
));
115 msg
->subcode
= subcode
;
119 static int hclgevf_get_basic_info(struct hclgevf_dev
*hdev
)
121 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
122 u8 resp_msg
[HCLGE_MBX_MAX_RESP_DATA_SIZE
];
123 struct hclge_basic_info
*basic_info
;
124 struct hclge_vf_to_pf_msg send_msg
;
128 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_BASIC_INFO
, 0);
129 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
132 dev_err(&hdev
->pdev
->dev
,
133 "failed to get basic info from pf, ret = %d", status
);
137 basic_info
= (struct hclge_basic_info
*)resp_msg
;
139 hdev
->hw_tc_map
= basic_info
->hw_tc_map
;
140 hdev
->mbx_api_version
= le16_to_cpu(basic_info
->mbx_api_version
);
141 caps
= le32_to_cpu(basic_info
->pf_caps
);
142 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B
, &caps
))
143 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
);
148 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev
*hdev
)
150 struct hnae3_handle
*nic
= &hdev
->nic
;
151 struct hclge_vf_to_pf_msg send_msg
;
155 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
156 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE
);
157 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &resp_msg
,
160 dev_err(&hdev
->pdev
->dev
,
161 "VF request to get port based vlan state failed %d",
166 nic
->port_base_vlan_state
= resp_msg
;
171 static int hclgevf_get_queue_info(struct hclgevf_dev
*hdev
)
173 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
175 struct hclge_mbx_vf_queue_info
*queue_info
;
176 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
177 struct hclge_vf_to_pf_msg send_msg
;
180 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QINFO
, 0);
181 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
182 HCLGEVF_TQPS_RSS_INFO_LEN
);
184 dev_err(&hdev
->pdev
->dev
,
185 "VF request to get tqp info from PF failed %d",
190 queue_info
= (struct hclge_mbx_vf_queue_info
*)resp_msg
;
191 hdev
->num_tqps
= le16_to_cpu(queue_info
->num_tqps
);
192 hdev
->rss_size_max
= le16_to_cpu(queue_info
->rss_size
);
193 hdev
->rx_buf_len
= le16_to_cpu(queue_info
->rx_buf_len
);
198 static int hclgevf_get_queue_depth(struct hclgevf_dev
*hdev
)
200 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
202 struct hclge_mbx_vf_queue_depth
*queue_depth
;
203 u8 resp_msg
[HCLGEVF_TQPS_DEPTH_INFO_LEN
];
204 struct hclge_vf_to_pf_msg send_msg
;
207 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QDEPTH
, 0);
208 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
209 HCLGEVF_TQPS_DEPTH_INFO_LEN
);
211 dev_err(&hdev
->pdev
->dev
,
212 "VF request to get tqp depth info from PF failed %d",
217 queue_depth
= (struct hclge_mbx_vf_queue_depth
*)resp_msg
;
218 hdev
->num_tx_desc
= le16_to_cpu(queue_depth
->num_tx_desc
);
219 hdev
->num_rx_desc
= le16_to_cpu(queue_depth
->num_rx_desc
);
224 static u16
hclgevf_get_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
226 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
227 struct hclge_vf_to_pf_msg send_msg
;
232 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_QID_IN_PF
, 0);
233 *(__le16
*)send_msg
.data
= cpu_to_le16(queue_id
);
234 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_data
,
237 qid_in_pf
= le16_to_cpu(*(__le16
*)resp_data
);
242 static int hclgevf_get_pf_media_type(struct hclgevf_dev
*hdev
)
244 struct hclge_vf_to_pf_msg send_msg
;
248 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MEDIA_TYPE
, 0);
249 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
252 dev_err(&hdev
->pdev
->dev
,
253 "VF request to get the pf port media type failed %d",
258 hdev
->hw
.mac
.media_type
= resp_msg
[0];
259 hdev
->hw
.mac
.module_type
= resp_msg
[1];
264 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
266 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
267 struct hclge_comm_tqp
*tqp
;
270 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
271 sizeof(struct hclge_comm_tqp
), GFP_KERNEL
);
277 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
278 tqp
->dev
= &hdev
->pdev
->dev
;
281 tqp
->q
.ae_algo
= &ae_algovf
;
282 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
283 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
284 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
286 /* need an extended offset to configure queues >=
287 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
289 if (i
< HCLGEVF_TQP_MAX_SIZE_DEV_V2
)
290 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
291 HCLGEVF_TQP_REG_OFFSET
+
292 i
* HCLGEVF_TQP_REG_SIZE
;
294 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
295 HCLGEVF_TQP_REG_OFFSET
+
296 HCLGEVF_TQP_EXT_REG_OFFSET
+
297 (i
- HCLGEVF_TQP_MAX_SIZE_DEV_V2
) *
298 HCLGEVF_TQP_REG_SIZE
;
300 /* when device supports tx push and has device memory,
301 * the queue can execute push mode or doorbell mode on
304 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
305 tqp
->q
.mem_base
= hdev
->hw
.hw
.mem_base
+
306 HCLGEVF_TQP_MEM_OFFSET(hdev
, i
);
314 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
316 struct hnae3_handle
*nic
= &hdev
->nic
;
317 struct hnae3_knic_private_info
*kinfo
;
318 u16 new_tqps
= hdev
->num_tqps
;
323 kinfo
->num_tx_desc
= hdev
->num_tx_desc
;
324 kinfo
->num_rx_desc
= hdev
->num_rx_desc
;
325 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
326 for (i
= 0; i
< HCLGE_COMM_MAX_TC_NUM
; i
++)
327 if (hdev
->hw_tc_map
& BIT(i
))
330 num_tc
= num_tc
? num_tc
: 1;
331 kinfo
->tc_info
.num_tc
= num_tc
;
332 kinfo
->rss_size
= min_t(u16
, hdev
->rss_size_max
, new_tqps
/ num_tc
);
333 new_tqps
= kinfo
->rss_size
* num_tc
;
334 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
336 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
337 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
341 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
342 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
343 hdev
->htqp
[i
].q
.tqp_index
= i
;
344 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
347 /* after init the max rss_size and tqps, adjust the default tqp numbers
348 * and rss size with the actual vector numbers
350 kinfo
->num_tqps
= min_t(u16
, hdev
->num_nic_msix
- 1, kinfo
->num_tqps
);
351 kinfo
->rss_size
= min_t(u16
, kinfo
->num_tqps
/ num_tc
,
357 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
359 struct hclge_vf_to_pf_msg send_msg
;
362 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_STATUS
, 0);
363 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
365 dev_err(&hdev
->pdev
->dev
,
366 "VF failed to fetch link status(%d) from PF", status
);
369 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
371 struct hnae3_handle
*rhandle
= &hdev
->roce
;
372 struct hnae3_handle
*handle
= &hdev
->nic
;
373 struct hnae3_client
*rclient
;
374 struct hnae3_client
*client
;
376 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
))
379 client
= handle
->client
;
380 rclient
= hdev
->roce_client
;
383 test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) ? 0 : link_state
;
384 if (link_state
!= hdev
->hw
.mac
.link
) {
385 hdev
->hw
.mac
.link
= link_state
;
386 client
->ops
->link_status_change(handle
, !!link_state
);
387 if (rclient
&& rclient
->ops
->link_status_change
)
388 rclient
->ops
->link_status_change(rhandle
, !!link_state
);
391 clear_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
);
394 static void hclgevf_update_link_mode(struct hclgevf_dev
*hdev
)
396 #define HCLGEVF_ADVERTISING 0
397 #define HCLGEVF_SUPPORTED 1
399 struct hclge_vf_to_pf_msg send_msg
;
401 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_LINK_MODE
, 0);
402 send_msg
.data
[0] = HCLGEVF_ADVERTISING
;
403 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
404 send_msg
.data
[0] = HCLGEVF_SUPPORTED
;
405 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
408 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
410 struct hnae3_handle
*nic
= &hdev
->nic
;
413 nic
->ae_algo
= &ae_algovf
;
414 nic
->pdev
= hdev
->pdev
;
415 nic
->numa_node_mask
= hdev
->numa_node_mask
;
416 nic
->flags
|= HNAE3_SUPPORT_VF
;
417 nic
->kinfo
.io_base
= hdev
->hw
.hw
.io_base
;
419 ret
= hclgevf_knic_setup(hdev
);
421 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
426 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
428 if (hdev
->vector_status
[vector_id
] == HCLGEVF_INVALID_VPORT
) {
429 dev_warn(&hdev
->pdev
->dev
,
430 "vector(vector_id %d) has been freed.\n", vector_id
);
434 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
435 hdev
->num_msi_left
+= 1;
436 hdev
->num_msi_used
-= 1;
439 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
440 struct hnae3_vector_info
*vector_info
)
442 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
443 struct hnae3_vector_info
*vector
= vector_info
;
447 vector_num
= min_t(u16
, hdev
->num_nic_msix
- 1, vector_num
);
448 vector_num
= min(hdev
->num_msi_left
, vector_num
);
450 for (j
= 0; j
< vector_num
; j
++) {
451 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
452 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
453 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
454 vector
->io_addr
= hdev
->hw
.hw
.io_base
+
455 HCLGEVF_VECTOR_REG_BASE
+
456 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
457 hdev
->vector_status
[i
] = 0;
458 hdev
->vector_irq
[i
] = vector
->vector
;
467 hdev
->num_msi_left
-= alloc
;
468 hdev
->num_msi_used
+= alloc
;
473 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
477 for (i
= 0; i
< hdev
->num_msi
; i
++)
478 if (vector
== hdev
->vector_irq
[i
])
484 /* for revision 0x20, vf shared the same rss config with pf */
485 static int hclgevf_get_rss_hash_key(struct hclgevf_dev
*hdev
)
487 #define HCLGEVF_RSS_MBX_RESP_LEN 8
488 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
489 u8 resp_msg
[HCLGEVF_RSS_MBX_RESP_LEN
];
490 struct hclge_vf_to_pf_msg send_msg
;
491 u16 msg_num
, hash_key_index
;
495 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_RSS_KEY
, 0);
496 msg_num
= (HCLGE_COMM_RSS_KEY_SIZE
+ HCLGEVF_RSS_MBX_RESP_LEN
- 1) /
497 HCLGEVF_RSS_MBX_RESP_LEN
;
498 for (index
= 0; index
< msg_num
; index
++) {
499 send_msg
.data
[0] = index
;
500 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, resp_msg
,
501 HCLGEVF_RSS_MBX_RESP_LEN
);
503 dev_err(&hdev
->pdev
->dev
,
504 "VF get rss hash key from PF failed, ret=%d",
509 hash_key_index
= HCLGEVF_RSS_MBX_RESP_LEN
* index
;
510 if (index
== msg_num
- 1)
511 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
513 HCLGE_COMM_RSS_KEY_SIZE
- hash_key_index
);
515 memcpy(&rss_cfg
->rss_hash_key
[hash_key_index
],
516 &resp_msg
[0], HCLGEVF_RSS_MBX_RESP_LEN
);
522 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
525 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
526 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
529 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
530 hclge_comm_get_rss_hash_info(rss_cfg
, key
, hfunc
);
533 *hfunc
= ETH_RSS_HASH_TOP
;
535 ret
= hclgevf_get_rss_hash_key(hdev
);
538 memcpy(key
, rss_cfg
->rss_hash_key
,
539 HCLGE_COMM_RSS_KEY_SIZE
);
543 hclge_comm_get_rss_indir_tbl(rss_cfg
, indir
,
544 hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
);
549 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
550 const u8
*key
, const u8 hfunc
)
552 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
553 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
556 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
557 ret
= hclge_comm_set_rss_hash_key(rss_cfg
, &hdev
->hw
.hw
, key
,
563 /* update the shadow RSS table with user specified qids */
564 for (i
= 0; i
< hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
565 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
567 /* update the hardware */
568 return hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
569 rss_cfg
->rss_indirection_tbl
);
572 static int hclgevf_set_rss_tuple(struct hnae3_handle
*handle
,
573 struct ethtool_rxnfc
*nfc
)
575 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
578 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
581 ret
= hclge_comm_set_rss_tuple(hdev
->ae_dev
, &hdev
->hw
.hw
,
582 &hdev
->rss_cfg
, nfc
);
584 dev_err(&hdev
->pdev
->dev
,
585 "failed to set rss tuple, ret = %d.\n", ret
);
590 static int hclgevf_get_rss_tuple(struct hnae3_handle
*handle
,
591 struct ethtool_rxnfc
*nfc
)
593 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
597 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
602 ret
= hclge_comm_get_rss_tuple(&hdev
->rss_cfg
, nfc
->flow_type
,
604 if (ret
|| !tuple_sets
)
607 nfc
->data
= hclge_comm_convert_rss_tuple(tuple_sets
);
612 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
614 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
615 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
617 return rss_cfg
->rss_size
;
620 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
622 struct hnae3_ring_chain_node
*ring_chain
)
624 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
625 struct hclge_vf_to_pf_msg send_msg
;
626 struct hnae3_ring_chain_node
*node
;
630 memset(&send_msg
, 0, sizeof(send_msg
));
631 send_msg
.code
= en
? HCLGE_MBX_MAP_RING_TO_VECTOR
:
632 HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
633 send_msg
.vector_id
= vector_id
;
635 for (node
= ring_chain
; node
; node
= node
->next
) {
636 send_msg
.param
[i
].ring_type
=
637 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
639 send_msg
.param
[i
].tqp_index
= node
->tqp_index
;
640 send_msg
.param
[i
].int_gl_index
=
641 hnae3_get_field(node
->int_gl_idx
,
643 HNAE3_RING_GL_IDX_S
);
646 if (i
== HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM
|| !node
->next
) {
647 send_msg
.ring_num
= i
;
649 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false,
652 dev_err(&hdev
->pdev
->dev
,
653 "Map TQP fail, status is %d.\n",
664 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
665 struct hnae3_ring_chain_node
*ring_chain
)
667 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
670 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
672 dev_err(&handle
->pdev
->dev
,
673 "Get vector index fail. ret =%d\n", vector_id
);
677 return hclgevf_bind_ring_to_vector(handle
, true, vector_id
, ring_chain
);
680 static int hclgevf_unmap_ring_from_vector(
681 struct hnae3_handle
*handle
,
683 struct hnae3_ring_chain_node
*ring_chain
)
685 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
688 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
691 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
693 dev_err(&handle
->pdev
->dev
,
694 "Get vector index fail. ret =%d\n", vector_id
);
698 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector_id
, ring_chain
);
700 dev_err(&handle
->pdev
->dev
,
701 "Unmap ring from vector fail. vector=%d, ret =%d\n",
708 static int hclgevf_put_vector(struct hnae3_handle
*handle
, int vector
)
710 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
713 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
715 dev_err(&handle
->pdev
->dev
,
716 "hclgevf_put_vector get vector index fail. ret =%d\n",
721 hclgevf_free_vector(hdev
, vector_id
);
726 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
,
727 bool en_uc_pmc
, bool en_mc_pmc
,
730 struct hnae3_handle
*handle
= &hdev
->nic
;
731 struct hclge_vf_to_pf_msg send_msg
;
734 memset(&send_msg
, 0, sizeof(send_msg
));
735 send_msg
.code
= HCLGE_MBX_SET_PROMISC_MODE
;
736 send_msg
.en_bc
= en_bc_pmc
? 1 : 0;
737 send_msg
.en_uc
= en_uc_pmc
? 1 : 0;
738 send_msg
.en_mc
= en_mc_pmc
? 1 : 0;
739 send_msg
.en_limit_promisc
= test_bit(HNAE3_PFLAG_LIMIT_PROMISC
,
740 &handle
->priv_flags
) ? 1 : 0;
742 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
744 dev_err(&hdev
->pdev
->dev
,
745 "Set promisc mode fail, status is %d.\n", ret
);
750 static int hclgevf_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
753 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
756 en_bc_pmc
= hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
;
758 return hclgevf_cmd_set_promisc_mode(hdev
, en_uc_pmc
, en_mc_pmc
,
762 static void hclgevf_request_update_promisc_mode(struct hnae3_handle
*handle
)
764 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
766 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
767 hclgevf_task_schedule(hdev
, 0);
770 static void hclgevf_sync_promisc_mode(struct hclgevf_dev
*hdev
)
772 struct hnae3_handle
*handle
= &hdev
->nic
;
773 bool en_uc_pmc
= handle
->netdev_flags
& HNAE3_UPE
;
774 bool en_mc_pmc
= handle
->netdev_flags
& HNAE3_MPE
;
777 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
)) {
778 ret
= hclgevf_set_promisc_mode(handle
, en_uc_pmc
, en_mc_pmc
);
780 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
784 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev
*hdev
, u16 tqp_id
,
785 u16 stream_id
, bool enable
)
787 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
788 struct hclge_desc desc
;
790 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
792 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
793 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
794 req
->stream_id
= cpu_to_le16(stream_id
);
796 req
->enable
|= 1U << HCLGEVF_TQP_ENABLE_B
;
798 return hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
801 static int hclgevf_tqp_enable(struct hnae3_handle
*handle
, bool enable
)
803 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
807 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
808 ret
= hclgevf_tqp_enable_cmd_send(hdev
, i
, 0, enable
);
816 static int hclgevf_get_host_mac_addr(struct hclgevf_dev
*hdev
, u8
*p
)
818 struct hclge_vf_to_pf_msg send_msg
;
819 u8 host_mac
[ETH_ALEN
];
822 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_GET_MAC_ADDR
, 0);
823 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, host_mac
,
826 dev_err(&hdev
->pdev
->dev
,
827 "fail to get VF MAC from host %d", status
);
831 ether_addr_copy(p
, host_mac
);
836 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
838 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
839 u8 host_mac_addr
[ETH_ALEN
];
841 if (hclgevf_get_host_mac_addr(hdev
, host_mac_addr
))
844 hdev
->has_pf_mac
= !is_zero_ether_addr(host_mac_addr
);
845 if (hdev
->has_pf_mac
)
846 ether_addr_copy(p
, host_mac_addr
);
848 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
851 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, const void *p
,
854 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
855 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
856 struct hclge_vf_to_pf_msg send_msg
;
857 u8
*new_mac_addr
= (u8
*)p
;
860 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_UNICAST
, 0);
861 send_msg
.subcode
= HCLGE_MBX_MAC_VLAN_UC_MODIFY
;
862 ether_addr_copy(send_msg
.data
, new_mac_addr
);
863 if (is_first
&& !hdev
->has_pf_mac
)
864 eth_zero_addr(&send_msg
.data
[ETH_ALEN
]);
866 ether_addr_copy(&send_msg
.data
[ETH_ALEN
], old_mac_addr
);
867 status
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
869 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
874 static struct hclgevf_mac_addr_node
*
875 hclgevf_find_mac_node(struct list_head
*list
, const u8
*mac_addr
)
877 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
879 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
880 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
886 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node
*mac_node
,
887 enum HCLGEVF_MAC_NODE_STATE state
)
890 /* from set_rx_mode or tmp_add_list */
891 case HCLGEVF_MAC_TO_ADD
:
892 if (mac_node
->state
== HCLGEVF_MAC_TO_DEL
)
893 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
895 /* only from set_rx_mode */
896 case HCLGEVF_MAC_TO_DEL
:
897 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
898 list_del(&mac_node
->node
);
901 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
904 /* only from tmp_add_list, the mac_node->state won't be
907 case HCLGEVF_MAC_ACTIVE
:
908 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
909 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
914 static int hclgevf_update_mac_list(struct hnae3_handle
*handle
,
915 enum HCLGEVF_MAC_NODE_STATE state
,
916 enum HCLGEVF_MAC_ADDR_TYPE mac_type
,
917 const unsigned char *addr
)
919 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
920 struct hclgevf_mac_addr_node
*mac_node
;
921 struct list_head
*list
;
923 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
924 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
926 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
928 /* if the mac addr is already in the mac list, no need to add a new
929 * one into it, just check the mac addr state, convert it to a new
930 * state, or just remove it, or do nothing.
932 mac_node
= hclgevf_find_mac_node(list
, addr
);
934 hclgevf_update_mac_node(mac_node
, state
);
935 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
938 /* if this address is never added, unnecessary to delete */
939 if (state
== HCLGEVF_MAC_TO_DEL
) {
940 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
944 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
946 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
950 mac_node
->state
= state
;
951 ether_addr_copy(mac_node
->mac_addr
, addr
);
952 list_add_tail(&mac_node
->node
, list
);
954 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
958 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
959 const unsigned char *addr
)
961 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
962 HCLGEVF_MAC_ADDR_UC
, addr
);
965 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
966 const unsigned char *addr
)
968 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
969 HCLGEVF_MAC_ADDR_UC
, addr
);
972 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
973 const unsigned char *addr
)
975 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_ADD
,
976 HCLGEVF_MAC_ADDR_MC
, addr
);
979 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
980 const unsigned char *addr
)
982 return hclgevf_update_mac_list(handle
, HCLGEVF_MAC_TO_DEL
,
983 HCLGEVF_MAC_ADDR_MC
, addr
);
986 static int hclgevf_add_del_mac_addr(struct hclgevf_dev
*hdev
,
987 struct hclgevf_mac_addr_node
*mac_node
,
988 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
990 struct hclge_vf_to_pf_msg send_msg
;
993 if (mac_type
== HCLGEVF_MAC_ADDR_UC
) {
994 code
= HCLGE_MBX_SET_UNICAST
;
995 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
996 subcode
= HCLGE_MBX_MAC_VLAN_UC_ADD
;
998 subcode
= HCLGE_MBX_MAC_VLAN_UC_REMOVE
;
1000 code
= HCLGE_MBX_SET_MULTICAST
;
1001 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
)
1002 subcode
= HCLGE_MBX_MAC_VLAN_MC_ADD
;
1004 subcode
= HCLGE_MBX_MAC_VLAN_MC_REMOVE
;
1007 hclgevf_build_send_msg(&send_msg
, code
, subcode
);
1008 ether_addr_copy(send_msg
.data
, mac_node
->mac_addr
);
1009 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1012 static void hclgevf_config_mac_list(struct hclgevf_dev
*hdev
,
1013 struct list_head
*list
,
1014 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1016 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
1017 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1020 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1021 ret
= hclgevf_add_del_mac_addr(hdev
, mac_node
, mac_type
);
1023 hnae3_format_mac_addr(format_mac_addr
,
1024 mac_node
->mac_addr
);
1025 dev_err(&hdev
->pdev
->dev
,
1026 "failed to configure mac %s, state = %d, ret = %d\n",
1027 format_mac_addr
, mac_node
->state
, ret
);
1030 if (mac_node
->state
== HCLGEVF_MAC_TO_ADD
) {
1031 mac_node
->state
= HCLGEVF_MAC_ACTIVE
;
1033 list_del(&mac_node
->node
);
1039 static void hclgevf_sync_from_add_list(struct list_head
*add_list
,
1040 struct list_head
*mac_list
)
1042 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1044 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
1045 /* if the mac address from tmp_add_list is not in the
1046 * uc/mc_mac_list, it means have received a TO_DEL request
1047 * during the time window of sending mac config request to PF
1048 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1049 * then it will be removed at next time. If is TO_ADD, it means
1050 * send TO_ADD request failed, so just remove the mac node.
1052 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1054 hclgevf_update_mac_node(new_node
, mac_node
->state
);
1055 list_del(&mac_node
->node
);
1057 } else if (mac_node
->state
== HCLGEVF_MAC_ACTIVE
) {
1058 mac_node
->state
= HCLGEVF_MAC_TO_DEL
;
1059 list_move_tail(&mac_node
->node
, mac_list
);
1061 list_del(&mac_node
->node
);
1067 static void hclgevf_sync_from_del_list(struct list_head
*del_list
,
1068 struct list_head
*mac_list
)
1070 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1072 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
1073 new_node
= hclgevf_find_mac_node(mac_list
, mac_node
->mac_addr
);
1075 /* If the mac addr is exist in the mac list, it means
1076 * received a new request TO_ADD during the time window
1077 * of sending mac addr configurrequest to PF, so just
1078 * change the mac state to ACTIVE.
1080 new_node
->state
= HCLGEVF_MAC_ACTIVE
;
1081 list_del(&mac_node
->node
);
1084 list_move_tail(&mac_node
->node
, mac_list
);
1089 static void hclgevf_clear_list(struct list_head
*list
)
1091 struct hclgevf_mac_addr_node
*mac_node
, *tmp
;
1093 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1094 list_del(&mac_node
->node
);
1099 static void hclgevf_sync_mac_list(struct hclgevf_dev
*hdev
,
1100 enum HCLGEVF_MAC_ADDR_TYPE mac_type
)
1102 struct hclgevf_mac_addr_node
*mac_node
, *tmp
, *new_node
;
1103 struct list_head tmp_add_list
, tmp_del_list
;
1104 struct list_head
*list
;
1106 INIT_LIST_HEAD(&tmp_add_list
);
1107 INIT_LIST_HEAD(&tmp_del_list
);
1109 /* move the mac addr to the tmp_add_list and tmp_del_list, then
1110 * we can add/delete these mac addr outside the spin lock
1112 list
= (mac_type
== HCLGEVF_MAC_ADDR_UC
) ?
1113 &hdev
->mac_table
.uc_mac_list
: &hdev
->mac_table
.mc_mac_list
;
1115 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1117 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
1118 switch (mac_node
->state
) {
1119 case HCLGEVF_MAC_TO_DEL
:
1120 list_move_tail(&mac_node
->node
, &tmp_del_list
);
1122 case HCLGEVF_MAC_TO_ADD
:
1123 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
1127 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
1128 new_node
->state
= mac_node
->state
;
1129 list_add_tail(&new_node
->node
, &tmp_add_list
);
1137 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1139 /* delete first, in order to get max mac table space for adding */
1140 hclgevf_config_mac_list(hdev
, &tmp_del_list
, mac_type
);
1141 hclgevf_config_mac_list(hdev
, &tmp_add_list
, mac_type
);
1143 /* if some mac addresses were added/deleted fail, move back to the
1144 * mac_list, and retry at next time.
1146 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1148 hclgevf_sync_from_del_list(&tmp_del_list
, list
);
1149 hclgevf_sync_from_add_list(&tmp_add_list
, list
);
1151 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1154 static void hclgevf_sync_mac_table(struct hclgevf_dev
*hdev
)
1156 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_UC
);
1157 hclgevf_sync_mac_list(hdev
, HCLGEVF_MAC_ADDR_MC
);
1160 static void hclgevf_uninit_mac_list(struct hclgevf_dev
*hdev
)
1162 spin_lock_bh(&hdev
->mac_table
.mac_list_lock
);
1164 hclgevf_clear_list(&hdev
->mac_table
.uc_mac_list
);
1165 hclgevf_clear_list(&hdev
->mac_table
.mc_mac_list
);
1167 spin_unlock_bh(&hdev
->mac_table
.mac_list_lock
);
1170 static int hclgevf_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
1172 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1173 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
1174 struct hclge_vf_to_pf_msg send_msg
;
1176 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
1179 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1180 HCLGE_MBX_ENABLE_VLAN_FILTER
);
1181 send_msg
.data
[0] = enable
? 1 : 0;
1183 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1186 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
1187 __be16 proto
, u16 vlan_id
,
1190 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1191 struct hclge_mbx_vlan_filter
*vlan_filter
;
1192 struct hclge_vf_to_pf_msg send_msg
;
1195 if (vlan_id
> HCLGEVF_MAX_VLAN_ID
)
1198 if (proto
!= htons(ETH_P_8021Q
))
1199 return -EPROTONOSUPPORT
;
1201 /* When device is resetting or reset failed, firmware is unable to
1202 * handle mailbox. Just record the vlan id, and remove it after
1205 if ((test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
1206 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
1207 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1211 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1212 HCLGE_MBX_VLAN_FILTER
);
1213 vlan_filter
= (struct hclge_mbx_vlan_filter
*)send_msg
.data
;
1214 vlan_filter
->is_kill
= is_kill
;
1215 vlan_filter
->vlan_id
= cpu_to_le16(vlan_id
);
1216 vlan_filter
->proto
= cpu_to_le16(be16_to_cpu(proto
));
1218 /* when remove hw vlan filter failed, record the vlan id,
1219 * and try to remove it from hw later, to be consistence
1222 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1224 set_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1229 static void hclgevf_sync_vlan_filter(struct hclgevf_dev
*hdev
)
1231 #define HCLGEVF_MAX_SYNC_COUNT 60
1232 struct hnae3_handle
*handle
= &hdev
->nic
;
1233 int ret
, sync_cnt
= 0;
1236 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1237 while (vlan_id
!= VLAN_N_VID
) {
1238 ret
= hclgevf_set_vlan_filter(handle
, htons(ETH_P_8021Q
),
1243 clear_bit(vlan_id
, hdev
->vlan_del_fail_bmap
);
1245 if (sync_cnt
>= HCLGEVF_MAX_SYNC_COUNT
)
1248 vlan_id
= find_first_bit(hdev
->vlan_del_fail_bmap
, VLAN_N_VID
);
1252 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
1254 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1255 struct hclge_vf_to_pf_msg send_msg
;
1257 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
1258 HCLGE_MBX_VLAN_RX_OFF_CFG
);
1259 send_msg
.data
[0] = enable
? 1 : 0;
1260 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1263 static int hclgevf_reset_tqp(struct hnae3_handle
*handle
)
1265 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
1266 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1267 struct hclge_vf_to_pf_msg send_msg
;
1268 u8 return_status
= 0;
1272 /* disable vf queue before send queue reset msg to PF */
1273 ret
= hclgevf_tqp_enable(handle
, false);
1275 dev_err(&hdev
->pdev
->dev
, "failed to disable tqp, ret = %d\n",
1280 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_QUEUE_RESET
, 0);
1282 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, &return_status
,
1283 sizeof(return_status
));
1284 if (ret
|| return_status
== HCLGEVF_RESET_ALL_QUEUE_DONE
)
1287 for (i
= 1; i
< handle
->kinfo
.num_tqps
; i
++) {
1288 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_QUEUE_RESET
, 0);
1289 *(__le16
*)send_msg
.data
= cpu_to_le16(i
);
1290 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1298 static int hclgevf_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
1300 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1301 struct hclge_mbx_mtu_info
*mtu_info
;
1302 struct hclge_vf_to_pf_msg send_msg
;
1304 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_MTU
, 0);
1305 mtu_info
= (struct hclge_mbx_mtu_info
*)send_msg
.data
;
1306 mtu_info
->mtu
= cpu_to_le32(new_mtu
);
1308 return hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1311 static int hclgevf_notify_client(struct hclgevf_dev
*hdev
,
1312 enum hnae3_reset_notify_type type
)
1314 struct hnae3_client
*client
= hdev
->nic_client
;
1315 struct hnae3_handle
*handle
= &hdev
->nic
;
1318 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
) ||
1322 if (!client
->ops
->reset_notify
)
1325 ret
= client
->ops
->reset_notify(handle
, type
);
1327 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
1333 static int hclgevf_notify_roce_client(struct hclgevf_dev
*hdev
,
1334 enum hnae3_reset_notify_type type
)
1336 struct hnae3_client
*client
= hdev
->roce_client
;
1337 struct hnae3_handle
*handle
= &hdev
->roce
;
1340 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
1343 if (!client
->ops
->reset_notify
)
1346 ret
= client
->ops
->reset_notify(handle
, type
);
1348 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
1353 static int hclgevf_reset_wait(struct hclgevf_dev
*hdev
)
1355 #define HCLGEVF_RESET_WAIT_US 20000
1356 #define HCLGEVF_RESET_WAIT_CNT 2000
1357 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1358 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1363 if (hdev
->reset_type
== HNAE3_VF_RESET
)
1364 ret
= readl_poll_timeout(hdev
->hw
.hw
.io_base
+
1365 HCLGEVF_VF_RST_ING
, val
,
1366 !(val
& HCLGEVF_VF_RST_ING_BIT
),
1367 HCLGEVF_RESET_WAIT_US
,
1368 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1370 ret
= readl_poll_timeout(hdev
->hw
.hw
.io_base
+
1371 HCLGEVF_RST_ING
, val
,
1372 !(val
& HCLGEVF_RST_ING_BITS
),
1373 HCLGEVF_RESET_WAIT_US
,
1374 HCLGEVF_RESET_WAIT_TIMEOUT_US
);
1376 /* hardware completion status should be available by this time */
1378 dev_err(&hdev
->pdev
->dev
,
1379 "couldn't get reset done status from h/w, timeout!\n");
1383 /* we will wait a bit more to let reset of the stack to complete. This
1384 * might happen in case reset assertion was made by PF. Yes, this also
1385 * means we might end up waiting bit more even for VF reset.
1387 if (hdev
->reset_type
== HNAE3_VF_FULL_RESET
)
1395 static void hclgevf_reset_handshake(struct hclgevf_dev
*hdev
, bool enable
)
1399 reg_val
= hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
);
1401 reg_val
|= HCLGEVF_NIC_SW_RST_RDY
;
1403 reg_val
&= ~HCLGEVF_NIC_SW_RST_RDY
;
1405 hclgevf_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
,
1409 static int hclgevf_reset_stack(struct hclgevf_dev
*hdev
)
1413 /* uninitialize the nic client */
1414 ret
= hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1418 /* re-initialize the hclge device */
1419 ret
= hclgevf_reset_hdev(hdev
);
1421 dev_err(&hdev
->pdev
->dev
,
1422 "hclge device re-init failed, VF is disabled!\n");
1426 /* bring up the nic client again */
1427 ret
= hclgevf_notify_client(hdev
, HNAE3_INIT_CLIENT
);
1431 /* clear handshake status with IMP */
1432 hclgevf_reset_handshake(hdev
, false);
1434 /* bring up the nic to enable TX/RX again */
1435 return hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
1438 static int hclgevf_reset_prepare_wait(struct hclgevf_dev
*hdev
)
1440 #define HCLGEVF_RESET_SYNC_TIME 100
1442 if (hdev
->reset_type
== HNAE3_VF_FUNC_RESET
) {
1443 struct hclge_vf_to_pf_msg send_msg
;
1446 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_RESET
, 0);
1447 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, true, NULL
, 0);
1449 dev_err(&hdev
->pdev
->dev
,
1450 "failed to assert VF reset, ret = %d\n", ret
);
1453 hdev
->rst_stats
.vf_func_rst_cnt
++;
1456 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
1457 /* inform hardware that preparatory work is done */
1458 msleep(HCLGEVF_RESET_SYNC_TIME
);
1459 hclgevf_reset_handshake(hdev
, true);
1460 dev_info(&hdev
->pdev
->dev
, "prepare reset(%d) wait done\n",
1466 static void hclgevf_dump_rst_info(struct hclgevf_dev
*hdev
)
1468 dev_info(&hdev
->pdev
->dev
, "VF function reset count: %u\n",
1469 hdev
->rst_stats
.vf_func_rst_cnt
);
1470 dev_info(&hdev
->pdev
->dev
, "FLR reset count: %u\n",
1471 hdev
->rst_stats
.flr_rst_cnt
);
1472 dev_info(&hdev
->pdev
->dev
, "VF reset count: %u\n",
1473 hdev
->rst_stats
.vf_rst_cnt
);
1474 dev_info(&hdev
->pdev
->dev
, "reset done count: %u\n",
1475 hdev
->rst_stats
.rst_done_cnt
);
1476 dev_info(&hdev
->pdev
->dev
, "HW reset done count: %u\n",
1477 hdev
->rst_stats
.hw_rst_done_cnt
);
1478 dev_info(&hdev
->pdev
->dev
, "reset count: %u\n",
1479 hdev
->rst_stats
.rst_cnt
);
1480 dev_info(&hdev
->pdev
->dev
, "reset fail count: %u\n",
1481 hdev
->rst_stats
.rst_fail_cnt
);
1482 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt enable status: 0x%x\n",
1483 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_MISC_VECTOR_REG_BASE
));
1484 dev_info(&hdev
->pdev
->dev
, "vector0 interrupt status: 0x%x\n",
1485 hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG
));
1486 dev_info(&hdev
->pdev
->dev
, "handshake status: 0x%x\n",
1487 hclgevf_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
));
1488 dev_info(&hdev
->pdev
->dev
, "function reset status: 0x%x\n",
1489 hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
));
1490 dev_info(&hdev
->pdev
->dev
, "hdev state: 0x%lx\n", hdev
->state
);
1493 static void hclgevf_reset_err_handle(struct hclgevf_dev
*hdev
)
1495 /* recover handshake status with IMP when reset fail */
1496 hclgevf_reset_handshake(hdev
, true);
1497 hdev
->rst_stats
.rst_fail_cnt
++;
1498 dev_err(&hdev
->pdev
->dev
, "failed to reset VF(%u)\n",
1499 hdev
->rst_stats
.rst_fail_cnt
);
1501 if (hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
)
1502 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
1504 if (hclgevf_is_reset_pending(hdev
)) {
1505 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1506 hclgevf_reset_task_schedule(hdev
);
1508 set_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1509 hclgevf_dump_rst_info(hdev
);
1513 static int hclgevf_reset_prepare(struct hclgevf_dev
*hdev
)
1517 hdev
->rst_stats
.rst_cnt
++;
1519 /* perform reset of the stack & ae device for a client */
1520 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
1525 /* bring down the nic to stop any ongoing TX/RX */
1526 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
1531 return hclgevf_reset_prepare_wait(hdev
);
1534 static int hclgevf_reset_rebuild(struct hclgevf_dev
*hdev
)
1538 hdev
->rst_stats
.hw_rst_done_cnt
++;
1539 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
1544 /* now, re-initialize the nic client and ae device */
1545 ret
= hclgevf_reset_stack(hdev
);
1548 dev_err(&hdev
->pdev
->dev
, "failed to reset VF stack\n");
1552 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
1553 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1557 hdev
->rst_stats
.rst_fail_cnt
< HCLGEVF_RESET_MAX_FAIL_CNT
- 1)
1560 ret
= hclgevf_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
1564 hdev
->last_reset_time
= jiffies
;
1565 hdev
->rst_stats
.rst_done_cnt
++;
1566 hdev
->rst_stats
.rst_fail_cnt
= 0;
1567 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
1572 static void hclgevf_reset(struct hclgevf_dev
*hdev
)
1574 if (hclgevf_reset_prepare(hdev
))
1577 /* check if VF could successfully fetch the hardware reset completion
1578 * status from the hardware
1580 if (hclgevf_reset_wait(hdev
)) {
1581 /* can't do much in this situation, will disable VF */
1582 dev_err(&hdev
->pdev
->dev
,
1583 "failed to fetch H/W reset completion status\n");
1587 if (hclgevf_reset_rebuild(hdev
))
1593 hclgevf_reset_err_handle(hdev
);
1596 static enum hnae3_reset_type
hclgevf_get_reset_level(unsigned long *addr
)
1598 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
1600 /* return the highest priority reset level amongst all */
1601 if (test_bit(HNAE3_VF_RESET
, addr
)) {
1602 rst_level
= HNAE3_VF_RESET
;
1603 clear_bit(HNAE3_VF_RESET
, addr
);
1604 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
1605 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1606 } else if (test_bit(HNAE3_VF_FULL_RESET
, addr
)) {
1607 rst_level
= HNAE3_VF_FULL_RESET
;
1608 clear_bit(HNAE3_VF_FULL_RESET
, addr
);
1609 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1610 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET
, addr
)) {
1611 rst_level
= HNAE3_VF_PF_FUNC_RESET
;
1612 clear_bit(HNAE3_VF_PF_FUNC_RESET
, addr
);
1613 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1614 } else if (test_bit(HNAE3_VF_FUNC_RESET
, addr
)) {
1615 rst_level
= HNAE3_VF_FUNC_RESET
;
1616 clear_bit(HNAE3_VF_FUNC_RESET
, addr
);
1617 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
1618 rst_level
= HNAE3_FLR_RESET
;
1619 clear_bit(HNAE3_FLR_RESET
, addr
);
1625 static void hclgevf_reset_event(struct pci_dev
*pdev
,
1626 struct hnae3_handle
*handle
)
1628 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1629 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1631 dev_info(&hdev
->pdev
->dev
, "received reset request from VF enet\n");
1633 if (hdev
->default_reset_request
)
1635 hclgevf_get_reset_level(&hdev
->default_reset_request
);
1637 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
1639 /* reset of this VF requested */
1640 set_bit(HCLGEVF_RESET_REQUESTED
, &hdev
->reset_state
);
1641 hclgevf_reset_task_schedule(hdev
);
1643 hdev
->last_reset_time
= jiffies
;
1646 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
1647 enum hnae3_reset_type rst_type
)
1649 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1651 set_bit(rst_type
, &hdev
->default_reset_request
);
1654 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
1656 writel(en
? 1 : 0, vector
->addr
);
1659 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev
*ae_dev
,
1660 enum hnae3_reset_type rst_type
)
1662 #define HCLGEVF_RESET_RETRY_WAIT_MS 500
1663 #define HCLGEVF_RESET_RETRY_CNT 5
1665 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1669 while (retry_cnt
++ < HCLGEVF_RESET_RETRY_CNT
) {
1670 down(&hdev
->reset_sem
);
1671 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1672 hdev
->reset_type
= rst_type
;
1673 ret
= hclgevf_reset_prepare(hdev
);
1674 if (!ret
&& !hdev
->reset_pending
)
1677 dev_err(&hdev
->pdev
->dev
,
1678 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1679 ret
, hdev
->reset_pending
, retry_cnt
);
1680 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1681 up(&hdev
->reset_sem
);
1682 msleep(HCLGEVF_RESET_RETRY_WAIT_MS
);
1685 /* disable misc vector before reset done */
1686 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1688 if (hdev
->reset_type
== HNAE3_FLR_RESET
)
1689 hdev
->rst_stats
.flr_rst_cnt
++;
1692 static void hclgevf_reset_done(struct hnae3_ae_dev
*ae_dev
)
1694 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1697 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1699 ret
= hclgevf_reset_rebuild(hdev
);
1701 dev_warn(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n",
1704 hdev
->reset_type
= HNAE3_NONE_RESET
;
1705 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1706 up(&hdev
->reset_sem
);
1709 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
1711 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1713 return hdev
->fw_version
;
1716 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
1718 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
1720 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
1721 HCLGEVF_MISC_VECTOR_NUM
);
1722 vector
->addr
= hdev
->hw
.hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
1723 /* vector status always valid for Vector 0 */
1724 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
1725 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
1727 hdev
->num_msi_left
-= 1;
1728 hdev
->num_msi_used
+= 1;
1731 void hclgevf_reset_task_schedule(struct hclgevf_dev
*hdev
)
1733 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1734 test_bit(HCLGEVF_STATE_SERVICE_INITED
, &hdev
->state
) &&
1735 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
,
1737 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
1740 void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
1742 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1743 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
,
1745 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, 0);
1748 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
,
1749 unsigned long delay
)
1751 if (!test_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
) &&
1752 !test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
))
1753 mod_delayed_work(hclgevf_wq
, &hdev
->service_task
, delay
);
1756 static void hclgevf_reset_service_task(struct hclgevf_dev
*hdev
)
1758 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
1760 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
1763 down(&hdev
->reset_sem
);
1764 set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1766 if (test_and_clear_bit(HCLGEVF_RESET_PENDING
,
1767 &hdev
->reset_state
)) {
1768 /* PF has intimated that it is about to reset the hardware.
1769 * We now have to poll & check if hardware has actually
1770 * completed the reset sequence. On hardware reset completion,
1771 * VF needs to reset the client and ae device.
1773 hdev
->reset_attempts
= 0;
1775 hdev
->last_reset_time
= jiffies
;
1777 hclgevf_get_reset_level(&hdev
->reset_pending
);
1778 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
1779 hclgevf_reset(hdev
);
1780 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED
,
1781 &hdev
->reset_state
)) {
1782 /* we could be here when either of below happens:
1783 * 1. reset was initiated due to watchdog timeout caused by
1784 * a. IMP was earlier reset and our TX got choked down and
1785 * which resulted in watchdog reacting and inducing VF
1786 * reset. This also means our cmdq would be unreliable.
1787 * b. problem in TX due to other lower layer(example link
1788 * layer not functioning properly etc.)
1789 * 2. VF reset might have been initiated due to some config
1792 * NOTE: Theres no clear way to detect above cases than to react
1793 * to the response of PF for this reset request. PF will ack the
1794 * 1b and 2. cases but we will not get any intimation about 1a
1795 * from PF as cmdq would be in unreliable state i.e. mailbox
1796 * communication between PF and VF would be broken.
1798 * if we are never geting into pending state it means either:
1799 * 1. PF is not receiving our request which could be due to IMP
1802 * We cannot do much for 2. but to check first we can try reset
1803 * our PCIe + stack and see if it alleviates the problem.
1805 if (hdev
->reset_attempts
> HCLGEVF_MAX_RESET_ATTEMPTS_CNT
) {
1806 /* prepare for full reset of stack + pcie interface */
1807 set_bit(HNAE3_VF_FULL_RESET
, &hdev
->reset_pending
);
1809 /* "defer" schedule the reset task again */
1810 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1812 hdev
->reset_attempts
++;
1814 set_bit(hdev
->reset_level
, &hdev
->reset_pending
);
1815 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1817 hclgevf_reset_task_schedule(hdev
);
1820 hdev
->reset_type
= HNAE3_NONE_RESET
;
1821 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1822 up(&hdev
->reset_sem
);
1825 static void hclgevf_mailbox_service_task(struct hclgevf_dev
*hdev
)
1827 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
1830 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
1833 hclgevf_mbx_async_handler(hdev
);
1835 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1838 static void hclgevf_keep_alive(struct hclgevf_dev
*hdev
)
1840 struct hclge_vf_to_pf_msg send_msg
;
1843 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
))
1846 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_KEEP_ALIVE
, 0);
1847 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
1849 dev_err(&hdev
->pdev
->dev
,
1850 "VF sends keep alive cmd failed(=%d)\n", ret
);
1853 static void hclgevf_periodic_service_task(struct hclgevf_dev
*hdev
)
1855 unsigned long delta
= round_jiffies_relative(HZ
);
1856 struct hnae3_handle
*handle
= &hdev
->nic
;
1858 if (test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
) ||
1859 test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
))
1862 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
1863 delta
= jiffies
- hdev
->last_serv_processed
;
1865 if (delta
< round_jiffies_relative(HZ
)) {
1866 delta
= round_jiffies_relative(HZ
) - delta
;
1871 hdev
->serv_processed_cnt
++;
1872 if (!(hdev
->serv_processed_cnt
% HCLGEVF_KEEP_ALIVE_TASK_INTERVAL
))
1873 hclgevf_keep_alive(hdev
);
1875 if (test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
)) {
1876 hdev
->last_serv_processed
= jiffies
;
1880 if (!(hdev
->serv_processed_cnt
% HCLGEVF_STATS_TIMER_INTERVAL
))
1881 hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
1883 /* VF does not need to request link status when this bit is set, because
1884 * PF will push its link status to VFs when link status changed.
1886 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS
, &hdev
->state
))
1887 hclgevf_request_link_info(hdev
);
1889 hclgevf_update_link_mode(hdev
);
1891 hclgevf_sync_vlan_filter(hdev
);
1893 hclgevf_sync_mac_table(hdev
);
1895 hclgevf_sync_promisc_mode(hdev
);
1897 hdev
->last_serv_processed
= jiffies
;
1900 hclgevf_task_schedule(hdev
, delta
);
1903 static void hclgevf_service_task(struct work_struct
*work
)
1905 struct hclgevf_dev
*hdev
= container_of(work
, struct hclgevf_dev
,
1908 hclgevf_reset_service_task(hdev
);
1909 hclgevf_mailbox_service_task(hdev
);
1910 hclgevf_periodic_service_task(hdev
);
1912 /* Handle reset and mbx again in case periodical task delays the
1913 * handling by calling hclgevf_task_schedule() in
1914 * hclgevf_periodic_service_task()
1916 hclgevf_reset_service_task(hdev
);
1917 hclgevf_mailbox_service_task(hdev
);
1920 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
1922 hclgevf_write_dev(&hdev
->hw
, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG
, regclr
);
1925 static enum hclgevf_evt_cause
hclgevf_check_evt_cause(struct hclgevf_dev
*hdev
,
1928 u32 val
, cmdq_stat_reg
, rst_ing_reg
;
1930 /* fetch the events from their corresponding regs */
1931 cmdq_stat_reg
= hclgevf_read_dev(&hdev
->hw
,
1932 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG
);
1933 if (BIT(HCLGEVF_VECTOR0_RST_INT_B
) & cmdq_stat_reg
) {
1934 rst_ing_reg
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
1935 dev_info(&hdev
->pdev
->dev
,
1936 "receive reset interrupt 0x%x!\n", rst_ing_reg
);
1937 set_bit(HNAE3_VF_RESET
, &hdev
->reset_pending
);
1938 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1939 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
1940 *clearval
= ~(1U << HCLGEVF_VECTOR0_RST_INT_B
);
1941 hdev
->rst_stats
.vf_rst_cnt
++;
1942 /* set up VF hardware reset status, its PF will clear
1943 * this status when PF has initialized done.
1945 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
);
1946 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VF_RST_ING
,
1947 val
| HCLGEVF_VF_RST_ING_BIT
);
1948 return HCLGEVF_VECTOR0_EVENT_RST
;
1951 /* check for vector0 mailbox(=CMDQ RX) event source */
1952 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_stat_reg
) {
1953 /* for revision 0x21, clearing interrupt is writing bit 0
1954 * to the clear register, writing bit 1 means to keep the
1956 * for revision 0x20, the clear register is a read & write
1957 * register, so we should just write 0 to the bit we are
1958 * handling, and keep other bits as cmdq_stat_reg.
1960 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
1961 *clearval
= ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
1963 *clearval
= cmdq_stat_reg
&
1964 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
1966 return HCLGEVF_VECTOR0_EVENT_MBX
;
1969 /* print other vector0 event source */
1970 dev_info(&hdev
->pdev
->dev
,
1971 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
1974 return HCLGEVF_VECTOR0_EVENT_OTHER
;
1977 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
1979 enum hclgevf_evt_cause event_cause
;
1980 struct hclgevf_dev
*hdev
= data
;
1983 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1984 event_cause
= hclgevf_check_evt_cause(hdev
, &clearval
);
1985 if (event_cause
!= HCLGEVF_VECTOR0_EVENT_OTHER
)
1986 hclgevf_clear_event_cause(hdev
, clearval
);
1988 switch (event_cause
) {
1989 case HCLGEVF_VECTOR0_EVENT_RST
:
1990 hclgevf_reset_task_schedule(hdev
);
1992 case HCLGEVF_VECTOR0_EVENT_MBX
:
1993 hclgevf_mbx_handler(hdev
);
1999 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2004 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
2008 hdev
->gro_en
= true;
2010 ret
= hclgevf_get_basic_info(hdev
);
2014 /* get current port based vlan state from PF */
2015 ret
= hclgevf_get_port_base_vlan_filter_state(hdev
);
2019 /* get queue configuration from PF */
2020 ret
= hclgevf_get_queue_info(hdev
);
2024 /* get queue depth info from PF */
2025 ret
= hclgevf_get_queue_depth(hdev
);
2029 return hclgevf_get_pf_media_type(hdev
);
2032 static int hclgevf_alloc_hdev(struct hnae3_ae_dev
*ae_dev
)
2034 struct pci_dev
*pdev
= ae_dev
->pdev
;
2035 struct hclgevf_dev
*hdev
;
2037 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
2042 hdev
->ae_dev
= ae_dev
;
2043 ae_dev
->priv
= hdev
;
2048 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
2050 struct hnae3_handle
*roce
= &hdev
->roce
;
2051 struct hnae3_handle
*nic
= &hdev
->nic
;
2053 roce
->rinfo
.num_vectors
= hdev
->num_roce_msix
;
2055 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
2056 hdev
->num_msi_left
== 0)
2059 roce
->rinfo
.base_vector
= hdev
->roce_base_msix_offset
;
2061 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2062 roce
->rinfo
.roce_io_base
= hdev
->hw
.hw
.io_base
;
2063 roce
->rinfo
.roce_mem_base
= hdev
->hw
.hw
.mem_base
;
2065 roce
->pdev
= nic
->pdev
;
2066 roce
->ae_algo
= nic
->ae_algo
;
2067 roce
->numa_node_mask
= nic
->numa_node_mask
;
2072 static int hclgevf_config_gro(struct hclgevf_dev
*hdev
)
2074 struct hclgevf_cfg_gro_status_cmd
*req
;
2075 struct hclge_desc desc
;
2078 if (!hnae3_ae_dev_gro_supported(hdev
->ae_dev
))
2081 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
,
2083 req
= (struct hclgevf_cfg_gro_status_cmd
*)desc
.data
;
2085 req
->gro_en
= hdev
->gro_en
? 1 : 0;
2087 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2089 dev_err(&hdev
->pdev
->dev
,
2090 "VF GRO hardware config cmd failed, ret = %d.\n", ret
);
2095 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
2097 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
2098 u16 tc_offset
[HCLGE_COMM_MAX_TC_NUM
];
2099 u16 tc_valid
[HCLGE_COMM_MAX_TC_NUM
];
2100 u16 tc_size
[HCLGE_COMM_MAX_TC_NUM
];
2103 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
2104 ret
= hclge_comm_set_rss_algo_key(&hdev
->hw
.hw
,
2106 rss_cfg
->rss_hash_key
);
2110 ret
= hclge_comm_set_rss_input_tuple(&hdev
->hw
.hw
, rss_cfg
);
2115 ret
= hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
2116 rss_cfg
->rss_indirection_tbl
);
2120 hclge_comm_get_rss_tc_info(rss_cfg
->rss_size
, hdev
->hw_tc_map
,
2121 tc_offset
, tc_valid
, tc_size
);
2123 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
,
2127 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
2129 struct hnae3_handle
*nic
= &hdev
->nic
;
2132 ret
= hclgevf_en_hw_strip_rxvtag(nic
, true);
2134 dev_err(&hdev
->pdev
->dev
,
2135 "failed to enable rx vlan offload, ret = %d\n", ret
);
2139 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
2143 static void hclgevf_flush_link_update(struct hclgevf_dev
*hdev
)
2145 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2147 unsigned long last
= hdev
->serv_processed_cnt
;
2150 while (test_bit(HCLGEVF_STATE_LINK_UPDATING
, &hdev
->state
) &&
2151 i
++ < HCLGEVF_FLUSH_LINK_TIMEOUT
&&
2152 last
== hdev
->serv_processed_cnt
)
2156 static void hclgevf_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
2158 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2161 hclgevf_task_schedule(hdev
, 0);
2163 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2165 /* flush memory to make sure DOWN is seen by service task */
2166 smp_mb__before_atomic();
2167 hclgevf_flush_link_update(hdev
);
2171 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
2173 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2175 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2176 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS
, &hdev
->state
);
2178 hclge_comm_reset_tqp_stats(handle
);
2180 hclgevf_request_link_info(hdev
);
2182 hclgevf_update_link_mode(hdev
);
2187 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
2189 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2191 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2193 if (hdev
->reset_type
!= HNAE3_VF_RESET
)
2194 hclgevf_reset_tqp(handle
);
2196 hclge_comm_reset_tqp_stats(handle
);
2197 hclgevf_update_link_status(hdev
, 0);
2200 static int hclgevf_set_alive(struct hnae3_handle
*handle
, bool alive
)
2202 #define HCLGEVF_STATE_ALIVE 1
2203 #define HCLGEVF_STATE_NOT_ALIVE 0
2205 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2206 struct hclge_vf_to_pf_msg send_msg
;
2208 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_ALIVE
, 0);
2209 send_msg
.data
[0] = alive
? HCLGEVF_STATE_ALIVE
:
2210 HCLGEVF_STATE_NOT_ALIVE
;
2211 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2214 static int hclgevf_client_start(struct hnae3_handle
*handle
)
2216 return hclgevf_set_alive(handle
, true);
2219 static void hclgevf_client_stop(struct hnae3_handle
*handle
)
2221 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
2224 ret
= hclgevf_set_alive(handle
, false);
2226 dev_warn(&hdev
->pdev
->dev
,
2227 "%s failed %d\n", __func__
, ret
);
2230 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
2232 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
2233 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
2234 clear_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
);
2236 INIT_DELAYED_WORK(&hdev
->service_task
, hclgevf_service_task
);
2238 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
2239 sema_init(&hdev
->reset_sem
, 1);
2241 spin_lock_init(&hdev
->mac_table
.mac_list_lock
);
2242 INIT_LIST_HEAD(&hdev
->mac_table
.uc_mac_list
);
2243 INIT_LIST_HEAD(&hdev
->mac_table
.mc_mac_list
);
2245 /* bring the device down */
2246 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2249 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
2251 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
2252 set_bit(HCLGEVF_STATE_REMOVING
, &hdev
->state
);
2254 if (hdev
->service_task
.work
.func
)
2255 cancel_delayed_work_sync(&hdev
->service_task
);
2257 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
2260 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
2262 struct pci_dev
*pdev
= hdev
->pdev
;
2266 if (hnae3_dev_roce_supported(hdev
))
2267 vectors
= pci_alloc_irq_vectors(pdev
,
2268 hdev
->roce_base_msix_offset
+ 1,
2272 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2274 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2278 "failed(%d) to allocate MSI/MSI-X vectors\n",
2282 if (vectors
< hdev
->num_msi
)
2283 dev_warn(&hdev
->pdev
->dev
,
2284 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2285 hdev
->num_msi
, vectors
);
2287 hdev
->num_msi
= vectors
;
2288 hdev
->num_msi_left
= vectors
;
2290 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2291 sizeof(u16
), GFP_KERNEL
);
2292 if (!hdev
->vector_status
) {
2293 pci_free_irq_vectors(pdev
);
2297 for (i
= 0; i
< hdev
->num_msi
; i
++)
2298 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
2300 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2301 sizeof(int), GFP_KERNEL
);
2302 if (!hdev
->vector_irq
) {
2303 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2304 pci_free_irq_vectors(pdev
);
2311 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
2313 struct pci_dev
*pdev
= hdev
->pdev
;
2315 devm_kfree(&pdev
->dev
, hdev
->vector_status
);
2316 devm_kfree(&pdev
->dev
, hdev
->vector_irq
);
2317 pci_free_irq_vectors(pdev
);
2320 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
2324 hclgevf_get_misc_vector(hdev
);
2326 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
2327 HCLGEVF_NAME
, pci_name(hdev
->pdev
));
2328 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
2329 0, hdev
->misc_vector
.name
, hdev
);
2331 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
2332 hdev
->misc_vector
.vector_irq
);
2336 hclgevf_clear_event_cause(hdev
, 0);
2338 /* enable misc. vector(vector 0) */
2339 hclgevf_enable_vector(&hdev
->misc_vector
, true);
2344 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
2346 /* disable misc vector(vector 0) */
2347 hclgevf_enable_vector(&hdev
->misc_vector
, false);
2348 synchronize_irq(hdev
->misc_vector
.vector_irq
);
2349 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
2350 hclgevf_free_vector(hdev
, 0);
2353 static void hclgevf_info_show(struct hclgevf_dev
*hdev
)
2355 struct device
*dev
= &hdev
->pdev
->dev
;
2357 dev_info(dev
, "VF info begin:\n");
2359 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
2360 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
2361 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
2362 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
2363 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
2364 dev_info(dev
, "PF media type of this VF: %u\n",
2365 hdev
->hw
.mac
.media_type
);
2367 dev_info(dev
, "VF info end.\n");
2370 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
2371 struct hnae3_client
*client
)
2373 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2374 int rst_cnt
= hdev
->rst_stats
.rst_cnt
;
2377 ret
= client
->ops
->init_instance(&hdev
->nic
);
2381 set_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2382 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
2383 rst_cnt
!= hdev
->rst_stats
.rst_cnt
) {
2384 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2386 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2390 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2392 if (netif_msg_drv(&hdev
->nic
))
2393 hclgevf_info_show(hdev
);
2398 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
2399 struct hnae3_client
*client
)
2401 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2404 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
2408 ret
= hclgevf_init_roce_base_info(hdev
);
2412 ret
= client
->ops
->init_instance(&hdev
->roce
);
2416 set_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2417 hnae3_set_client_init_flag(client
, ae_dev
, 1);
2422 static int hclgevf_init_client_instance(struct hnae3_client
*client
,
2423 struct hnae3_ae_dev
*ae_dev
)
2425 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2428 switch (client
->type
) {
2429 case HNAE3_CLIENT_KNIC
:
2430 hdev
->nic_client
= client
;
2431 hdev
->nic
.client
= client
;
2433 ret
= hclgevf_init_nic_client_instance(ae_dev
, client
);
2437 ret
= hclgevf_init_roce_client_instance(ae_dev
,
2443 case HNAE3_CLIENT_ROCE
:
2444 if (hnae3_dev_roce_supported(hdev
)) {
2445 hdev
->roce_client
= client
;
2446 hdev
->roce
.client
= client
;
2449 ret
= hclgevf_init_roce_client_instance(ae_dev
, client
);
2461 hdev
->nic_client
= NULL
;
2462 hdev
->nic
.client
= NULL
;
2465 hdev
->roce_client
= NULL
;
2466 hdev
->roce
.client
= NULL
;
2470 static void hclgevf_uninit_client_instance(struct hnae3_client
*client
,
2471 struct hnae3_ae_dev
*ae_dev
)
2473 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2475 /* un-init roce, if it exists */
2476 if (hdev
->roce_client
) {
2477 while (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
2478 msleep(HCLGEVF_WAIT_RESET_DONE
);
2479 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED
, &hdev
->state
);
2481 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
2482 hdev
->roce_client
= NULL
;
2483 hdev
->roce
.client
= NULL
;
2486 /* un-init nic/unic, if this was not called by roce client */
2487 if (client
->ops
->uninit_instance
&& hdev
->nic_client
&&
2488 client
->type
!= HNAE3_CLIENT_ROCE
) {
2489 while (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
2490 msleep(HCLGEVF_WAIT_RESET_DONE
);
2491 clear_bit(HCLGEVF_STATE_NIC_REGISTERED
, &hdev
->state
);
2493 client
->ops
->uninit_instance(&hdev
->nic
, 0);
2494 hdev
->nic_client
= NULL
;
2495 hdev
->nic
.client
= NULL
;
2499 static int hclgevf_dev_mem_map(struct hclgevf_dev
*hdev
)
2501 struct pci_dev
*pdev
= hdev
->pdev
;
2502 struct hclgevf_hw
*hw
= &hdev
->hw
;
2504 /* for device does not have device memory, return directly */
2505 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGEVF_MEM_BAR
)))
2509 devm_ioremap_wc(&pdev
->dev
,
2510 pci_resource_start(pdev
, HCLGEVF_MEM_BAR
),
2511 pci_resource_len(pdev
, HCLGEVF_MEM_BAR
));
2512 if (!hw
->hw
.mem_base
) {
2513 dev_err(&pdev
->dev
, "failed to map device memory\n");
2520 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
2522 struct pci_dev
*pdev
= hdev
->pdev
;
2523 struct hclgevf_hw
*hw
;
2526 ret
= pci_enable_device(pdev
);
2528 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
2532 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2534 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
2535 goto err_disable_device
;
2538 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
2540 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
2541 goto err_disable_device
;
2544 pci_set_master(pdev
);
2546 hw
->hw
.io_base
= pci_iomap(pdev
, 2, 0);
2547 if (!hw
->hw
.io_base
) {
2548 dev_err(&pdev
->dev
, "can't map configuration register space\n");
2550 goto err_release_regions
;
2553 ret
= hclgevf_dev_mem_map(hdev
);
2555 goto err_unmap_io_base
;
2560 pci_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
2561 err_release_regions
:
2562 pci_release_regions(pdev
);
2564 pci_disable_device(pdev
);
2569 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
2571 struct pci_dev
*pdev
= hdev
->pdev
;
2573 if (hdev
->hw
.hw
.mem_base
)
2574 devm_iounmap(&pdev
->dev
, hdev
->hw
.hw
.mem_base
);
2576 pci_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
2577 pci_release_regions(pdev
);
2578 pci_disable_device(pdev
);
2581 static int hclgevf_query_vf_resource(struct hclgevf_dev
*hdev
)
2583 struct hclgevf_query_res_cmd
*req
;
2584 struct hclge_desc desc
;
2587 hclgevf_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_VF_RSRC
, true);
2588 ret
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
2590 dev_err(&hdev
->pdev
->dev
,
2591 "query vf resource failed, ret = %d.\n", ret
);
2595 req
= (struct hclgevf_query_res_cmd
*)desc
.data
;
2597 if (hnae3_dev_roce_supported(hdev
)) {
2598 hdev
->roce_base_msix_offset
=
2599 hnae3_get_field(le16_to_cpu(req
->msixcap_localid_ba_rocee
),
2600 HCLGEVF_MSIX_OFT_ROCEE_M
,
2601 HCLGEVF_MSIX_OFT_ROCEE_S
);
2602 hdev
->num_roce_msix
=
2603 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
2604 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
2606 /* nic's msix numbers is always equals to the roce's. */
2607 hdev
->num_nic_msix
= hdev
->num_roce_msix
;
2609 /* VF should have NIC vectors and Roce vectors, NIC vectors
2610 * are queued before Roce vectors. The offset is fixed to 64.
2612 hdev
->num_msi
= hdev
->num_roce_msix
+
2613 hdev
->roce_base_msix_offset
;
2616 hnae3_get_field(le16_to_cpu(req
->vf_intr_vector_number
),
2617 HCLGEVF_VEC_NUM_M
, HCLGEVF_VEC_NUM_S
);
2619 hdev
->num_nic_msix
= hdev
->num_msi
;
2622 if (hdev
->num_nic_msix
< HNAE3_MIN_VECTOR_NUM
) {
2623 dev_err(&hdev
->pdev
->dev
,
2624 "Just %u msi resources, not enough for vf(min:2).\n",
2625 hdev
->num_nic_msix
);
2632 static void hclgevf_set_default_dev_specs(struct hclgevf_dev
*hdev
)
2634 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
2636 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2638 ae_dev
->dev_specs
.max_non_tso_bd_num
=
2639 HCLGEVF_MAX_NON_TSO_BD_NUM
;
2640 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
2641 ae_dev
->dev_specs
.rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
2642 ae_dev
->dev_specs
.max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
2643 ae_dev
->dev_specs
.max_frm_size
= HCLGEVF_MAC_MAX_FRAME
;
2646 static void hclgevf_parse_dev_specs(struct hclgevf_dev
*hdev
,
2647 struct hclge_desc
*desc
)
2649 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2650 struct hclgevf_dev_specs_0_cmd
*req0
;
2651 struct hclgevf_dev_specs_1_cmd
*req1
;
2653 req0
= (struct hclgevf_dev_specs_0_cmd
*)desc
[0].data
;
2654 req1
= (struct hclgevf_dev_specs_1_cmd
*)desc
[1].data
;
2656 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
2657 ae_dev
->dev_specs
.rss_ind_tbl_size
=
2658 le16_to_cpu(req0
->rss_ind_tbl_size
);
2659 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
2660 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
2661 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
2662 ae_dev
->dev_specs
.max_frm_size
= le16_to_cpu(req1
->max_frm_size
);
2665 static void hclgevf_check_dev_specs(struct hclgevf_dev
*hdev
)
2667 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
2669 if (!dev_specs
->max_non_tso_bd_num
)
2670 dev_specs
->max_non_tso_bd_num
= HCLGEVF_MAX_NON_TSO_BD_NUM
;
2671 if (!dev_specs
->rss_ind_tbl_size
)
2672 dev_specs
->rss_ind_tbl_size
= HCLGEVF_RSS_IND_TBL_SIZE
;
2673 if (!dev_specs
->rss_key_size
)
2674 dev_specs
->rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
2675 if (!dev_specs
->max_int_gl
)
2676 dev_specs
->max_int_gl
= HCLGEVF_DEF_MAX_INT_GL
;
2677 if (!dev_specs
->max_frm_size
)
2678 dev_specs
->max_frm_size
= HCLGEVF_MAC_MAX_FRAME
;
2681 static int hclgevf_query_dev_specs(struct hclgevf_dev
*hdev
)
2683 struct hclge_desc desc
[HCLGEVF_QUERY_DEV_SPECS_BD_NUM
];
2687 /* set default specifications as devices lower than version V3 do not
2688 * support querying specifications from firmware.
2690 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
2691 hclgevf_set_default_dev_specs(hdev
);
2695 for (i
= 0; i
< HCLGEVF_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
2696 hclgevf_cmd_setup_basic_desc(&desc
[i
],
2697 HCLGE_OPC_QUERY_DEV_SPECS
, true);
2698 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2700 hclgevf_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
, true);
2702 ret
= hclgevf_cmd_send(&hdev
->hw
, desc
, HCLGEVF_QUERY_DEV_SPECS_BD_NUM
);
2706 hclgevf_parse_dev_specs(hdev
, desc
);
2707 hclgevf_check_dev_specs(hdev
);
2712 static int hclgevf_pci_reset(struct hclgevf_dev
*hdev
)
2714 struct pci_dev
*pdev
= hdev
->pdev
;
2717 if ((hdev
->reset_type
== HNAE3_VF_FULL_RESET
||
2718 hdev
->reset_type
== HNAE3_FLR_RESET
) &&
2719 test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
2720 hclgevf_misc_irq_uninit(hdev
);
2721 hclgevf_uninit_msi(hdev
);
2722 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2725 if (!test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
2726 pci_set_master(pdev
);
2727 ret
= hclgevf_init_msi(hdev
);
2730 "failed(%d) to init MSI/MSI-X\n", ret
);
2734 ret
= hclgevf_misc_irq_init(hdev
);
2736 hclgevf_uninit_msi(hdev
);
2737 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
2742 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2748 static int hclgevf_clear_vport_list(struct hclgevf_dev
*hdev
)
2750 struct hclge_vf_to_pf_msg send_msg
;
2752 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_HANDLE_VF_TBL
,
2753 HCLGE_MBX_VPORT_LIST_CLEAR
);
2754 return hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2757 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev
*hdev
)
2759 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
2760 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_RXD_ADV_LAYOUT_EN_REG
, 1);
2763 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev
*hdev
)
2765 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
2766 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_RXD_ADV_LAYOUT_EN_REG
, 0);
2769 static int hclgevf_reset_hdev(struct hclgevf_dev
*hdev
)
2771 struct pci_dev
*pdev
= hdev
->pdev
;
2774 ret
= hclgevf_pci_reset(hdev
);
2776 dev_err(&pdev
->dev
, "pci reset failed %d\n", ret
);
2780 hclgevf_arq_init(hdev
);
2781 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
,
2782 &hdev
->fw_version
, false,
2783 hdev
->reset_pending
);
2785 dev_err(&pdev
->dev
, "cmd failed %d\n", ret
);
2789 ret
= hclgevf_rss_init_hw(hdev
);
2791 dev_err(&hdev
->pdev
->dev
,
2792 "failed(%d) to initialize RSS\n", ret
);
2796 ret
= hclgevf_config_gro(hdev
);
2800 ret
= hclgevf_init_vlan_config(hdev
);
2802 dev_err(&hdev
->pdev
->dev
,
2803 "failed(%d) to initialize VLAN config\n", ret
);
2807 /* get current port based vlan state from PF */
2808 ret
= hclgevf_get_port_base_vlan_filter_state(hdev
);
2812 set_bit(HCLGEVF_STATE_PROMISC_CHANGED
, &hdev
->state
);
2814 hclgevf_init_rxd_adv_layout(hdev
);
2816 dev_info(&hdev
->pdev
->dev
, "Reset done\n");
2821 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
)
2823 struct pci_dev
*pdev
= hdev
->pdev
;
2826 ret
= hclgevf_pci_init(hdev
);
2830 ret
= hclgevf_devlink_init(hdev
);
2832 goto err_devlink_init
;
2834 ret
= hclge_comm_cmd_queue_init(hdev
->pdev
, &hdev
->hw
.hw
);
2836 goto err_cmd_queue_init
;
2838 hclgevf_arq_init(hdev
);
2839 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
,
2840 &hdev
->fw_version
, false,
2841 hdev
->reset_pending
);
2845 /* Get vf resource */
2846 ret
= hclgevf_query_vf_resource(hdev
);
2850 ret
= hclgevf_query_dev_specs(hdev
);
2853 "failed to query dev specifications, ret = %d\n", ret
);
2857 ret
= hclgevf_init_msi(hdev
);
2859 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
2863 hclgevf_state_init(hdev
);
2864 hdev
->reset_level
= HNAE3_VF_FUNC_RESET
;
2865 hdev
->reset_type
= HNAE3_NONE_RESET
;
2867 ret
= hclgevf_misc_irq_init(hdev
);
2869 goto err_misc_irq_init
;
2871 set_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2873 ret
= hclgevf_configure(hdev
);
2875 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
2879 ret
= hclgevf_alloc_tqps(hdev
);
2881 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
2885 ret
= hclgevf_set_handle_info(hdev
);
2889 ret
= hclgevf_config_gro(hdev
);
2893 /* Initialize RSS for this VF */
2894 ret
= hclge_comm_rss_init_cfg(&hdev
->nic
, hdev
->ae_dev
,
2897 dev_err(&pdev
->dev
, "failed to init rss cfg, ret = %d\n", ret
);
2901 ret
= hclgevf_rss_init_hw(hdev
);
2903 dev_err(&hdev
->pdev
->dev
,
2904 "failed(%d) to initialize RSS\n", ret
);
2908 /* ensure vf tbl list as empty before init */
2909 ret
= hclgevf_clear_vport_list(hdev
);
2912 "failed to clear tbl list configuration, ret = %d.\n",
2917 ret
= hclgevf_init_vlan_config(hdev
);
2919 dev_err(&hdev
->pdev
->dev
,
2920 "failed(%d) to initialize VLAN config\n", ret
);
2924 hclgevf_init_rxd_adv_layout(hdev
);
2926 set_bit(HCLGEVF_STATE_SERVICE_INITED
, &hdev
->state
);
2928 hdev
->last_reset_time
= jiffies
;
2929 dev_info(&hdev
->pdev
->dev
, "finished initializing %s driver\n",
2930 HCLGEVF_DRIVER_NAME
);
2932 hclgevf_task_schedule(hdev
, round_jiffies_relative(HZ
));
2937 hclgevf_misc_irq_uninit(hdev
);
2939 hclgevf_state_uninit(hdev
);
2940 hclgevf_uninit_msi(hdev
);
2942 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
2944 hclgevf_devlink_uninit(hdev
);
2946 hclgevf_pci_uninit(hdev
);
2947 clear_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
);
2951 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
)
2953 struct hclge_vf_to_pf_msg send_msg
;
2955 hclgevf_state_uninit(hdev
);
2956 hclgevf_uninit_rxd_adv_layout(hdev
);
2958 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_VF_UNINIT
, 0);
2959 hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
2961 if (test_bit(HCLGEVF_STATE_IRQ_INITED
, &hdev
->state
)) {
2962 hclgevf_misc_irq_uninit(hdev
);
2963 hclgevf_uninit_msi(hdev
);
2966 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
2967 hclgevf_devlink_uninit(hdev
);
2968 hclgevf_pci_uninit(hdev
);
2969 hclgevf_uninit_mac_list(hdev
);
2972 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
2974 struct pci_dev
*pdev
= ae_dev
->pdev
;
2977 ret
= hclgevf_alloc_hdev(ae_dev
);
2979 dev_err(&pdev
->dev
, "hclge device allocation failed\n");
2983 ret
= hclgevf_init_hdev(ae_dev
->priv
);
2985 dev_err(&pdev
->dev
, "hclge device initialization failed\n");
2992 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
2994 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
2996 hclgevf_uninit_hdev(hdev
);
2997 ae_dev
->priv
= NULL
;
3000 static u32
hclgevf_get_max_channels(struct hclgevf_dev
*hdev
)
3002 struct hnae3_handle
*nic
= &hdev
->nic
;
3003 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
3005 return min_t(u32
, hdev
->rss_size_max
,
3006 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3010 * hclgevf_get_channels - Get the current channels enabled and max supported.
3011 * @handle: hardware information for network interface
3012 * @ch: ethtool channels structure
3014 * We don't support separate tx and rx queues as channels. The other count
3015 * represents how many queues are being used for control. max_combined counts
3016 * how many queue pairs we can support. They may not be mapped 1 to 1 with
3017 * q_vectors since we support a lot more queue pairs than q_vectors.
3019 static void hclgevf_get_channels(struct hnae3_handle
*handle
,
3020 struct ethtool_channels
*ch
)
3022 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3024 ch
->max_combined
= hclgevf_get_max_channels(hdev
);
3025 ch
->other_count
= 0;
3027 ch
->combined_count
= handle
->kinfo
.rss_size
;
3030 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
3031 u16
*alloc_tqps
, u16
*max_rss_size
)
3033 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3035 *alloc_tqps
= hdev
->num_tqps
;
3036 *max_rss_size
= hdev
->rss_size_max
;
3039 static void hclgevf_update_rss_size(struct hnae3_handle
*handle
,
3042 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3043 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3046 kinfo
->req_rss_size
= new_tqps_num
;
3048 max_rss_size
= min_t(u16
, hdev
->rss_size_max
,
3049 hdev
->num_tqps
/ kinfo
->tc_info
.num_tc
);
3051 /* Use the user's configuration when it is not larger than
3052 * max_rss_size, otherwise, use the maximum specification value.
3054 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
3055 kinfo
->req_rss_size
<= max_rss_size
)
3056 kinfo
->rss_size
= kinfo
->req_rss_size
;
3057 else if (kinfo
->rss_size
> max_rss_size
||
3058 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
))
3059 kinfo
->rss_size
= max_rss_size
;
3061 kinfo
->num_tqps
= kinfo
->tc_info
.num_tc
* kinfo
->rss_size
;
3064 static int hclgevf_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
3065 bool rxfh_configured
)
3067 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3068 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3069 u16 tc_offset
[HCLGE_COMM_MAX_TC_NUM
];
3070 u16 tc_valid
[HCLGE_COMM_MAX_TC_NUM
];
3071 u16 tc_size
[HCLGE_COMM_MAX_TC_NUM
];
3072 u16 cur_rss_size
= kinfo
->rss_size
;
3073 u16 cur_tqps
= kinfo
->num_tqps
;
3078 hclgevf_update_rss_size(handle
, new_tqps_num
);
3080 hclge_comm_get_rss_tc_info(kinfo
->rss_size
, hdev
->hw_tc_map
,
3081 tc_offset
, tc_valid
, tc_size
);
3082 ret
= hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
,
3087 /* RSS indirection table has been configured by user */
3088 if (rxfh_configured
)
3091 /* Reinitializes the rss indirect table according to the new RSS size */
3092 rss_indir
= kcalloc(hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
,
3093 sizeof(u32
), GFP_KERNEL
);
3097 for (i
= 0; i
< hdev
->ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
3098 rss_indir
[i
] = i
% kinfo
->rss_size
;
3100 hdev
->rss_cfg
.rss_size
= kinfo
->rss_size
;
3102 ret
= hclgevf_set_rss(handle
, rss_indir
, NULL
, 0);
3104 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
3111 dev_info(&hdev
->pdev
->dev
,
3112 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3113 cur_rss_size
, kinfo
->rss_size
,
3114 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
3119 static int hclgevf_get_status(struct hnae3_handle
*handle
)
3121 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3123 return hdev
->hw
.mac
.link
;
3126 static void hclgevf_get_ksettings_an_result(struct hnae3_handle
*handle
,
3127 u8
*auto_neg
, u32
*speed
,
3128 u8
*duplex
, u32
*lane_num
)
3130 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3133 *speed
= hdev
->hw
.mac
.speed
;
3135 *duplex
= hdev
->hw
.mac
.duplex
;
3137 *auto_neg
= AUTONEG_DISABLE
;
3140 void hclgevf_update_speed_duplex(struct hclgevf_dev
*hdev
, u32 speed
,
3143 hdev
->hw
.mac
.speed
= speed
;
3144 hdev
->hw
.mac
.duplex
= duplex
;
3147 static int hclgevf_gro_en(struct hnae3_handle
*handle
, bool enable
)
3149 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3150 bool gro_en_old
= hdev
->gro_en
;
3153 hdev
->gro_en
= enable
;
3154 ret
= hclgevf_config_gro(hdev
);
3156 hdev
->gro_en
= gro_en_old
;
3161 static void hclgevf_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
3164 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3167 *media_type
= hdev
->hw
.mac
.media_type
;
3170 *module_type
= hdev
->hw
.mac
.module_type
;
3173 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle
*handle
)
3175 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3177 return !!hclgevf_read_dev(&hdev
->hw
, HCLGEVF_RST_ING
);
3180 static bool hclgevf_get_cmdq_stat(struct hnae3_handle
*handle
)
3182 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3184 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3187 static bool hclgevf_ae_dev_resetting(struct hnae3_handle
*handle
)
3189 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3191 return test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
3194 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
3196 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3198 return hdev
->rst_stats
.hw_rst_done_cnt
;
3201 static void hclgevf_get_link_mode(struct hnae3_handle
*handle
,
3202 unsigned long *supported
,
3203 unsigned long *advertising
)
3205 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
3207 *supported
= hdev
->hw
.mac
.supported
;
3208 *advertising
= hdev
->hw
.mac
.advertising
;
3211 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev
*hdev
, u16 state
,
3212 struct hclge_mbx_port_base_vlan
*port_base_vlan
)
3214 struct hnae3_handle
*nic
= &hdev
->nic
;
3215 struct hclge_vf_to_pf_msg send_msg
;
3220 if (test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
) ||
3221 test_bit(HCLGEVF_STATE_RST_FAIL
, &hdev
->state
)) {
3222 dev_warn(&hdev
->pdev
->dev
,
3223 "is resetting when updating port based vlan info\n");
3228 ret
= hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
3234 /* send msg to PF and wait update port based vlan info */
3235 hclgevf_build_send_msg(&send_msg
, HCLGE_MBX_SET_VLAN
,
3236 HCLGE_MBX_PORT_BASE_VLAN_CFG
);
3237 memcpy(send_msg
.data
, port_base_vlan
, sizeof(*port_base_vlan
));
3238 ret
= hclgevf_send_mbx_msg(hdev
, &send_msg
, false, NULL
, 0);
3240 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
3241 nic
->port_base_vlan_state
= state
;
3243 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
3246 hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
3250 static const struct hnae3_ae_ops hclgevf_ops
= {
3251 .init_ae_dev
= hclgevf_init_ae_dev
,
3252 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
3253 .reset_prepare
= hclgevf_reset_prepare_general
,
3254 .reset_done
= hclgevf_reset_done
,
3255 .init_client_instance
= hclgevf_init_client_instance
,
3256 .uninit_client_instance
= hclgevf_uninit_client_instance
,
3257 .start
= hclgevf_ae_start
,
3258 .stop
= hclgevf_ae_stop
,
3259 .client_start
= hclgevf_client_start
,
3260 .client_stop
= hclgevf_client_stop
,
3261 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
3262 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
3263 .get_vector
= hclgevf_get_vector
,
3264 .put_vector
= hclgevf_put_vector
,
3265 .reset_queue
= hclgevf_reset_tqp
,
3266 .get_mac_addr
= hclgevf_get_mac_addr
,
3267 .set_mac_addr
= hclgevf_set_mac_addr
,
3268 .add_uc_addr
= hclgevf_add_uc_addr
,
3269 .rm_uc_addr
= hclgevf_rm_uc_addr
,
3270 .add_mc_addr
= hclgevf_add_mc_addr
,
3271 .rm_mc_addr
= hclgevf_rm_mc_addr
,
3272 .get_stats
= hclgevf_get_stats
,
3273 .update_stats
= hclgevf_update_stats
,
3274 .get_strings
= hclgevf_get_strings
,
3275 .get_sset_count
= hclgevf_get_sset_count
,
3276 .get_rss_key_size
= hclge_comm_get_rss_key_size
,
3277 .get_rss
= hclgevf_get_rss
,
3278 .set_rss
= hclgevf_set_rss
,
3279 .get_rss_tuple
= hclgevf_get_rss_tuple
,
3280 .set_rss_tuple
= hclgevf_set_rss_tuple
,
3281 .get_tc_size
= hclgevf_get_tc_size
,
3282 .get_fw_version
= hclgevf_get_fw_version
,
3283 .set_vlan_filter
= hclgevf_set_vlan_filter
,
3284 .enable_vlan_filter
= hclgevf_enable_vlan_filter
,
3285 .enable_hw_strip_rxvtag
= hclgevf_en_hw_strip_rxvtag
,
3286 .reset_event
= hclgevf_reset_event
,
3287 .set_default_reset_request
= hclgevf_set_def_reset_request
,
3288 .set_channels
= hclgevf_set_channels
,
3289 .get_channels
= hclgevf_get_channels
,
3290 .get_tqps_and_rss_info
= hclgevf_get_tqps_and_rss_info
,
3291 .get_regs_len
= hclgevf_get_regs_len
,
3292 .get_regs
= hclgevf_get_regs
,
3293 .get_status
= hclgevf_get_status
,
3294 .get_ksettings_an_result
= hclgevf_get_ksettings_an_result
,
3295 .get_media_type
= hclgevf_get_media_type
,
3296 .get_hw_reset_stat
= hclgevf_get_hw_reset_stat
,
3297 .ae_dev_resetting
= hclgevf_ae_dev_resetting
,
3298 .ae_dev_reset_cnt
= hclgevf_ae_dev_reset_cnt
,
3299 .set_gro_en
= hclgevf_gro_en
,
3300 .set_mtu
= hclgevf_set_mtu
,
3301 .get_global_queue_id
= hclgevf_get_qid_global
,
3302 .set_timer_task
= hclgevf_set_timer_task
,
3303 .get_link_mode
= hclgevf_get_link_mode
,
3304 .set_promisc_mode
= hclgevf_set_promisc_mode
,
3305 .request_update_promisc_mode
= hclgevf_request_update_promisc_mode
,
3306 .get_cmdq_stat
= hclgevf_get_cmdq_stat
,
3309 static struct hnae3_ae_algo ae_algovf
= {
3310 .ops
= &hclgevf_ops
,
3311 .pdev_id_table
= ae_algovf_pci_tbl
,
3314 static int __init
hclgevf_init(void)
3316 pr_info("%s is initializing\n", HCLGEVF_NAME
);
3318 hclgevf_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, HCLGEVF_NAME
);
3320 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME
);
3324 hnae3_register_ae_algo(&ae_algovf
);
3329 static void __exit
hclgevf_exit(void)
3331 hnae3_unregister_ae_algo(&ae_algovf
);
3332 destroy_workqueue(hclgevf_wq
);
3334 module_init(hclgevf_init
);
3335 module_exit(hclgevf_exit
);
3337 MODULE_LICENSE("GPL");
3338 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3339 MODULE_DESCRIPTION("HCLGEVF Driver");
3340 MODULE_VERSION(HCLGEVF_MOD_VERSION
);