1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_regs.h"
25 #include "hclge_err.h"
27 #include "hclge_devlink.h"
28 #include "hclge_comm_cmd.h"
30 #define HCLGE_NAME "hclge"
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 #define HCLGE_LINK_STATUS_MS 10
46 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
);
47 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
48 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
);
49 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
50 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
);
51 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
);
52 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
);
53 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
55 static int hclge_set_default_loopback(struct hclge_dev
*hdev
);
57 static void hclge_sync_mac_table(struct hclge_dev
*hdev
);
58 static void hclge_restore_hw_table(struct hclge_dev
*hdev
);
59 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
);
60 static void hclge_sync_fd_table(struct hclge_dev
*hdev
);
61 static void hclge_update_fec_stats(struct hclge_dev
*hdev
);
62 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
65 static struct hnae3_ae_algo ae_algo
;
67 static struct workqueue_struct
*hclge_wq
;
69 static const struct pci_device_id ae_algo_pci_tbl
[] = {
70 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
71 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
72 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
73 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
74 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
75 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
76 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
77 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_200G_RDMA
), 0},
78 /* required last entry */
82 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
84 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
85 "External Loopback test",
87 "Serdes serial Loopback test",
88 "Serdes parallel Loopback test",
92 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
93 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
94 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
95 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
96 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
97 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time
)},
99 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
100 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time
)},
101 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num
)},
103 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
104 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num
)},
105 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num
)},
107 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
108 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
109 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
111 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
113 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
115 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
117 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
119 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
121 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
123 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time
)},
125 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time
)},
127 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time
)},
129 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time
)},
131 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time
)},
133 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time
)},
135 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time
)},
137 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time
)},
139 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num
)},
141 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
143 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
145 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
147 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
149 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
151 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
153 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
155 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
157 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time
)},
159 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time
)},
161 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time
)},
163 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time
)},
165 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time
)},
167 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time
)},
169 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time
)},
171 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time
)},
173 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
175 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
177 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
179 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
181 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
183 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
185 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
187 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
189 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
191 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
193 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
195 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
197 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
199 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
201 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
203 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
205 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
207 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
209 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
211 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
213 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
215 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
217 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
219 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
221 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
223 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
225 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
227 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
229 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
231 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
233 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
235 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
237 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
239 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
241 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
243 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
245 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
247 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
249 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
251 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
253 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
255 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
257 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
259 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
261 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
263 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
265 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
267 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
269 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
271 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
274 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
276 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
278 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
280 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
282 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
284 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
286 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
288 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
290 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
292 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
294 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
296 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
300 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
302 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
303 .ethter_type
= cpu_to_le16(ETH_P_LLDP
),
304 .mac_addr
= {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
305 .i_port_bitmap
= 0x1,
309 static const struct key_info meta_data_key_info
[] = {
310 { PACKET_TYPE_ID
, 6 },
317 { TUNNEL_PACKET
, 1 },
320 static const struct key_info tuple_key_info
[] = {
321 { OUTER_DST_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
322 { OUTER_SRC_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
323 { OUTER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
, -1, -1 },
324 { OUTER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
325 { OUTER_ETH_TYPE
, 16, KEY_OPT_LE16
, -1, -1 },
326 { OUTER_L2_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
327 { OUTER_IP_TOS
, 8, KEY_OPT_U8
, -1, -1 },
328 { OUTER_IP_PROTO
, 8, KEY_OPT_U8
, -1, -1 },
329 { OUTER_SRC_IP
, 32, KEY_OPT_IP
, -1, -1 },
330 { OUTER_DST_IP
, 32, KEY_OPT_IP
, -1, -1 },
331 { OUTER_L3_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
332 { OUTER_SRC_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
333 { OUTER_DST_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
334 { OUTER_L4_RSV
, 32, KEY_OPT_LE32
, -1, -1 },
335 { OUTER_TUN_VNI
, 24, KEY_OPT_VNI
, -1, -1 },
336 { OUTER_TUN_FLOW_ID
, 8, KEY_OPT_U8
, -1, -1 },
337 { INNER_DST_MAC
, 48, KEY_OPT_MAC
,
338 offsetof(struct hclge_fd_rule
, tuples
.dst_mac
),
339 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_mac
) },
340 { INNER_SRC_MAC
, 48, KEY_OPT_MAC
,
341 offsetof(struct hclge_fd_rule
, tuples
.src_mac
),
342 offsetof(struct hclge_fd_rule
, tuples_mask
.src_mac
) },
343 { INNER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
,
344 offsetof(struct hclge_fd_rule
, tuples
.vlan_tag1
),
345 offsetof(struct hclge_fd_rule
, tuples_mask
.vlan_tag1
) },
346 { INNER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
347 { INNER_ETH_TYPE
, 16, KEY_OPT_LE16
,
348 offsetof(struct hclge_fd_rule
, tuples
.ether_proto
),
349 offsetof(struct hclge_fd_rule
, tuples_mask
.ether_proto
) },
350 { INNER_L2_RSV
, 16, KEY_OPT_LE16
,
351 offsetof(struct hclge_fd_rule
, tuples
.l2_user_def
),
352 offsetof(struct hclge_fd_rule
, tuples_mask
.l2_user_def
) },
353 { INNER_IP_TOS
, 8, KEY_OPT_U8
,
354 offsetof(struct hclge_fd_rule
, tuples
.ip_tos
),
355 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_tos
) },
356 { INNER_IP_PROTO
, 8, KEY_OPT_U8
,
357 offsetof(struct hclge_fd_rule
, tuples
.ip_proto
),
358 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_proto
) },
359 { INNER_SRC_IP
, 32, KEY_OPT_IP
,
360 offsetof(struct hclge_fd_rule
, tuples
.src_ip
),
361 offsetof(struct hclge_fd_rule
, tuples_mask
.src_ip
) },
362 { INNER_DST_IP
, 32, KEY_OPT_IP
,
363 offsetof(struct hclge_fd_rule
, tuples
.dst_ip
),
364 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_ip
) },
365 { INNER_L3_RSV
, 16, KEY_OPT_LE16
,
366 offsetof(struct hclge_fd_rule
, tuples
.l3_user_def
),
367 offsetof(struct hclge_fd_rule
, tuples_mask
.l3_user_def
) },
368 { INNER_SRC_PORT
, 16, KEY_OPT_LE16
,
369 offsetof(struct hclge_fd_rule
, tuples
.src_port
),
370 offsetof(struct hclge_fd_rule
, tuples_mask
.src_port
) },
371 { INNER_DST_PORT
, 16, KEY_OPT_LE16
,
372 offsetof(struct hclge_fd_rule
, tuples
.dst_port
),
373 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_port
) },
374 { INNER_L4_RSV
, 32, KEY_OPT_LE32
,
375 offsetof(struct hclge_fd_rule
, tuples
.l4_user_def
),
376 offsetof(struct hclge_fd_rule
, tuples_mask
.l4_user_def
) },
380 * hclge_cmd_send - send command to command queue
381 * @hw: pointer to the hw struct
382 * @desc: prefilled descriptor for describing the command
383 * @num : the number of descriptors to be sent
385 * This is the main send command for command queue, it
386 * sends the queue, cleans the queue, etc
388 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
390 return hclge_comm_cmd_send(&hw
->hw
, desc
, num
);
393 static int hclge_mac_update_stats_defective(struct hclge_dev
*hdev
)
395 #define HCLGE_MAC_CMD_NUM 21
397 u64
*data
= (u64
*)(&hdev
->mac_stats
);
398 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
404 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
405 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
407 dev_err(&hdev
->pdev
->dev
,
408 "Get MAC pkt stats fail, status = %d.\n", ret
);
413 /* The first desc has a 64-bit header, so data size need to minus 1 */
414 data_size
= sizeof(desc
) / (sizeof(u64
)) - 1;
416 desc_data
= (__le64
*)(&desc
[0].data
[0]);
417 for (i
= 0; i
< data_size
; i
++) {
418 /* data memory is continuous becase only the first desc has a
419 * header in this command
421 *data
+= le64_to_cpu(*desc_data
);
429 static int hclge_mac_update_stats_complete(struct hclge_dev
*hdev
)
431 #define HCLGE_REG_NUM_PER_DESC 4
433 u32 reg_num
= hdev
->ae_dev
->dev_specs
.mac_stats_num
;
434 u64
*data
= (u64
*)(&hdev
->mac_stats
);
435 struct hclge_desc
*desc
;
442 /* The first desc has a 64-bit header, so need to consider it */
443 desc_num
= reg_num
/ HCLGE_REG_NUM_PER_DESC
+ 1;
445 /* This may be called inside atomic sections,
446 * so GFP_ATOMIC is more suitalbe here
448 desc
= kcalloc(desc_num
, sizeof(struct hclge_desc
), GFP_ATOMIC
);
452 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC_ALL
, true);
453 ret
= hclge_cmd_send(&hdev
->hw
, desc
, desc_num
);
459 data_size
= min_t(u32
, sizeof(hdev
->mac_stats
) / sizeof(u64
), reg_num
);
461 desc_data
= (__le64
*)(&desc
[0].data
[0]);
462 for (i
= 0; i
< data_size
; i
++) {
463 /* data memory is continuous becase only the first desc has a
464 * header in this command
466 *data
+= le64_to_cpu(*desc_data
);
476 static int hclge_mac_query_reg_num(struct hclge_dev
*hdev
, u32
*reg_num
)
478 struct hclge_desc desc
;
481 /* Driver needs total register number of both valid registers and
482 * reserved registers, but the old firmware only returns number
483 * of valid registers in device V2. To be compatible with these
484 * devices, driver uses a fixed value.
486 if (hdev
->ae_dev
->dev_version
== HNAE3_DEVICE_VERSION_V2
) {
487 *reg_num
= HCLGE_MAC_STATS_MAX_NUM_V1
;
491 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_MAC_REG_NUM
, true);
492 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
494 dev_err(&hdev
->pdev
->dev
,
495 "failed to query mac statistic reg number, ret = %d\n",
500 *reg_num
= le32_to_cpu(desc
.data
[0]);
502 dev_err(&hdev
->pdev
->dev
,
503 "mac statistic reg number is invalid!\n");
510 int hclge_mac_update_stats(struct hclge_dev
*hdev
)
512 /* The firmware supports the new statistics acquisition method */
513 if (hdev
->ae_dev
->dev_specs
.mac_stats_num
)
514 return hclge_mac_update_stats_complete(hdev
);
516 return hclge_mac_update_stats_defective(hdev
);
519 static int hclge_comm_get_count(struct hclge_dev
*hdev
,
520 const struct hclge_comm_stats_str strs
[],
526 for (i
= 0; i
< size
; i
++)
527 if (strs
[i
].stats_num
<= hdev
->ae_dev
->dev_specs
.mac_stats_num
)
533 static u64
*hclge_comm_get_stats(struct hclge_dev
*hdev
,
534 const struct hclge_comm_stats_str strs
[],
540 for (i
= 0; i
< size
; i
++) {
541 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
544 *buf
= HCLGE_STATS_READ(&hdev
->mac_stats
, strs
[i
].offset
);
551 static u8
*hclge_comm_get_strings(struct hclge_dev
*hdev
, u32 stringset
,
552 const struct hclge_comm_stats_str strs
[],
555 char *buff
= (char *)data
;
558 if (stringset
!= ETH_SS_STATS
)
561 for (i
= 0; i
< size
; i
++) {
562 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
565 snprintf(buff
, ETH_GSTRING_LEN
, "%s", strs
[i
].desc
);
566 buff
= buff
+ ETH_GSTRING_LEN
;
572 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
574 struct hnae3_handle
*handle
;
577 handle
= &hdev
->vport
[0].nic
;
578 if (handle
->client
) {
579 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
581 dev_err(&hdev
->pdev
->dev
,
582 "Update TQPS stats fail, status = %d.\n",
587 hclge_update_fec_stats(hdev
);
589 status
= hclge_mac_update_stats(hdev
);
591 dev_err(&hdev
->pdev
->dev
,
592 "Update MAC stats fail, status = %d.\n", status
);
595 static void hclge_update_stats(struct hnae3_handle
*handle
)
597 struct hclge_vport
*vport
= hclge_get_vport(handle
);
598 struct hclge_dev
*hdev
= vport
->back
;
601 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
604 status
= hclge_mac_update_stats(hdev
);
606 dev_err(&hdev
->pdev
->dev
,
607 "Update MAC stats fail, status = %d.\n",
610 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
612 dev_err(&hdev
->pdev
->dev
,
613 "Update TQPS stats fail, status = %d.\n",
616 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
619 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
621 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
622 HNAE3_SUPPORT_PHY_LOOPBACK | \
623 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
624 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
625 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
627 struct hclge_vport
*vport
= hclge_get_vport(handle
);
628 struct hclge_dev
*hdev
= vport
->back
;
631 /* Loopback test support rules:
632 * mac: only GE mode support
633 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 * phy: only support when phy device exist on board
636 if (stringset
== ETH_SS_TEST
) {
637 /* clear loopback bit flags at first */
638 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
639 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
||
640 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
641 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
642 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
644 handle
->flags
|= HNAE3_SUPPORT_APP_LOOPBACK
;
648 handle
->flags
|= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
650 handle
->flags
|= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
652 handle
->flags
|= HNAE3_SUPPORT_EXTERNAL_LOOPBACK
;
654 if ((hdev
->hw
.mac
.phydev
&& hdev
->hw
.mac
.phydev
->drv
&&
655 hdev
->hw
.mac
.phydev
->drv
->set_loopback
) ||
656 hnae3_dev_phy_imp_supported(hdev
)) {
658 handle
->flags
|= HNAE3_SUPPORT_PHY_LOOPBACK
;
660 } else if (stringset
== ETH_SS_STATS
) {
661 count
= hclge_comm_get_count(hdev
, g_mac_stats_string
,
662 ARRAY_SIZE(g_mac_stats_string
)) +
663 hclge_comm_tqps_get_sset_count(handle
);
669 static void hclge_get_strings(struct hnae3_handle
*handle
, u32 stringset
,
672 struct hclge_vport
*vport
= hclge_get_vport(handle
);
673 struct hclge_dev
*hdev
= vport
->back
;
674 u8
*p
= (char *)data
;
677 if (stringset
== ETH_SS_STATS
) {
678 size
= ARRAY_SIZE(g_mac_stats_string
);
679 p
= hclge_comm_get_strings(hdev
, stringset
, g_mac_stats_string
,
681 p
= hclge_comm_tqps_get_strings(handle
, p
);
682 } else if (stringset
== ETH_SS_TEST
) {
683 if (handle
->flags
& HNAE3_SUPPORT_EXTERNAL_LOOPBACK
) {
684 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_EXTERNAL
],
686 p
+= ETH_GSTRING_LEN
;
688 if (handle
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
) {
689 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_APP
],
691 p
+= ETH_GSTRING_LEN
;
693 if (handle
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
) {
694 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_SERIAL_SERDES
],
696 p
+= ETH_GSTRING_LEN
;
698 if (handle
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
) {
700 hns3_nic_test_strs
[HNAE3_LOOP_PARALLEL_SERDES
],
702 p
+= ETH_GSTRING_LEN
;
704 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
705 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_PHY
],
707 p
+= ETH_GSTRING_LEN
;
712 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
714 struct hclge_vport
*vport
= hclge_get_vport(handle
);
715 struct hclge_dev
*hdev
= vport
->back
;
718 p
= hclge_comm_get_stats(hdev
, g_mac_stats_string
,
719 ARRAY_SIZE(g_mac_stats_string
), data
);
720 p
= hclge_comm_tqps_get_stats(handle
, p
);
723 static void hclge_get_mac_stat(struct hnae3_handle
*handle
,
724 struct hns3_mac_stats
*mac_stats
)
726 struct hclge_vport
*vport
= hclge_get_vport(handle
);
727 struct hclge_dev
*hdev
= vport
->back
;
729 hclge_update_stats(handle
);
731 mac_stats
->tx_pause_cnt
= hdev
->mac_stats
.mac_tx_mac_pause_num
;
732 mac_stats
->rx_pause_cnt
= hdev
->mac_stats
.mac_rx_mac_pause_num
;
735 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
736 struct hclge_func_status_cmd
*status
)
738 #define HCLGE_MAC_ID_MASK 0xF
740 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
743 /* Set the pf to main pf */
744 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
745 hdev
->flag
|= HCLGE_FLAG_MAIN
;
747 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
749 hdev
->hw
.mac
.mac_id
= status
->mac_id
& HCLGE_MAC_ID_MASK
;
753 static int hclge_query_function_status(struct hclge_dev
*hdev
)
755 #define HCLGE_QUERY_MAX_CNT 5
757 struct hclge_func_status_cmd
*req
;
758 struct hclge_desc desc
;
762 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
763 req
= (struct hclge_func_status_cmd
*)desc
.data
;
766 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
768 dev_err(&hdev
->pdev
->dev
,
769 "query function status failed %d.\n", ret
);
773 /* Check pf reset is done */
776 usleep_range(1000, 2000);
777 } while (timeout
++ < HCLGE_QUERY_MAX_CNT
);
779 return hclge_parse_func_status(hdev
, req
);
782 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
784 struct hclge_pf_res_cmd
*req
;
785 struct hclge_desc desc
;
788 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
789 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
791 dev_err(&hdev
->pdev
->dev
,
792 "query pf resource failed %d.\n", ret
);
796 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
797 hdev
->num_tqps
= le16_to_cpu(req
->tqp_num
) +
798 le16_to_cpu(req
->ext_tqp_num
);
799 hdev
->pkt_buf_size
= le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
801 if (req
->tx_buf_size
)
803 le16_to_cpu(req
->tx_buf_size
) << HCLGE_BUF_UNIT_S
;
805 hdev
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
807 hdev
->tx_buf_size
= roundup(hdev
->tx_buf_size
, HCLGE_BUF_SIZE_UNIT
);
809 if (req
->dv_buf_size
)
811 le16_to_cpu(req
->dv_buf_size
) << HCLGE_BUF_UNIT_S
;
813 hdev
->dv_buf_size
= HCLGE_DEFAULT_DV
;
815 hdev
->dv_buf_size
= roundup(hdev
->dv_buf_size
, HCLGE_BUF_SIZE_UNIT
);
817 hdev
->num_nic_msi
= le16_to_cpu(req
->msixcap_localid_number_nic
);
818 if (hdev
->num_nic_msi
< HNAE3_MIN_VECTOR_NUM
) {
819 dev_err(&hdev
->pdev
->dev
,
820 "only %u msi resources available, not enough for pf(min:2).\n",
825 if (hnae3_dev_roce_supported(hdev
)) {
827 le16_to_cpu(req
->pf_intr_vector_number_roce
);
829 /* PF should have NIC vectors and Roce vectors,
830 * NIC vectors are queued before Roce vectors.
832 hdev
->num_msi
= hdev
->num_nic_msi
+ hdev
->num_roce_msi
;
834 hdev
->num_msi
= hdev
->num_nic_msi
;
840 static int hclge_parse_speed(u8 speed_cmd
, u32
*speed
)
843 case HCLGE_FW_MAC_SPEED_10M
:
844 *speed
= HCLGE_MAC_SPEED_10M
;
846 case HCLGE_FW_MAC_SPEED_100M
:
847 *speed
= HCLGE_MAC_SPEED_100M
;
849 case HCLGE_FW_MAC_SPEED_1G
:
850 *speed
= HCLGE_MAC_SPEED_1G
;
852 case HCLGE_FW_MAC_SPEED_10G
:
853 *speed
= HCLGE_MAC_SPEED_10G
;
855 case HCLGE_FW_MAC_SPEED_25G
:
856 *speed
= HCLGE_MAC_SPEED_25G
;
858 case HCLGE_FW_MAC_SPEED_40G
:
859 *speed
= HCLGE_MAC_SPEED_40G
;
861 case HCLGE_FW_MAC_SPEED_50G
:
862 *speed
= HCLGE_MAC_SPEED_50G
;
864 case HCLGE_FW_MAC_SPEED_100G
:
865 *speed
= HCLGE_MAC_SPEED_100G
;
867 case HCLGE_FW_MAC_SPEED_200G
:
868 *speed
= HCLGE_MAC_SPEED_200G
;
877 static const struct hclge_speed_bit_map speed_bit_map
[] = {
878 {HCLGE_MAC_SPEED_10M
, HCLGE_SUPPORT_10M_BIT
},
879 {HCLGE_MAC_SPEED_100M
, HCLGE_SUPPORT_100M_BIT
},
880 {HCLGE_MAC_SPEED_1G
, HCLGE_SUPPORT_1G_BIT
},
881 {HCLGE_MAC_SPEED_10G
, HCLGE_SUPPORT_10G_BIT
},
882 {HCLGE_MAC_SPEED_25G
, HCLGE_SUPPORT_25G_BIT
},
883 {HCLGE_MAC_SPEED_40G
, HCLGE_SUPPORT_40G_BIT
},
884 {HCLGE_MAC_SPEED_50G
, HCLGE_SUPPORT_50G_BITS
},
885 {HCLGE_MAC_SPEED_100G
, HCLGE_SUPPORT_100G_BITS
},
886 {HCLGE_MAC_SPEED_200G
, HCLGE_SUPPORT_200G_BIT
},
889 static int hclge_get_speed_bit(u32 speed
, u32
*speed_bit
)
893 for (i
= 0; i
< ARRAY_SIZE(speed_bit_map
); i
++) {
894 if (speed
== speed_bit_map
[i
].speed
) {
895 *speed_bit
= speed_bit_map
[i
].speed_bit
;
903 static int hclge_check_port_speed(struct hnae3_handle
*handle
, u32 speed
)
905 struct hclge_vport
*vport
= hclge_get_vport(handle
);
906 struct hclge_dev
*hdev
= vport
->back
;
907 u32 speed_ability
= hdev
->hw
.mac
.speed_ability
;
911 ret
= hclge_get_speed_bit(speed
, &speed_bit
);
915 if (speed_bit
& speed_ability
)
921 static void hclge_update_fec_support(struct hclge_mac
*mac
)
923 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
, mac
->supported
);
924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
, mac
->supported
);
925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
, mac
->supported
);
926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
928 if (mac
->fec_ability
& BIT(HNAE3_FEC_BASER
))
929 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
931 if (mac
->fec_ability
& BIT(HNAE3_FEC_RS
))
932 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
934 if (mac
->fec_ability
& BIT(HNAE3_FEC_LLRS
))
935 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
937 if (mac
->fec_ability
& BIT(HNAE3_FEC_NONE
))
938 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
942 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap
[8] = {
943 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
},
944 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
},
945 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
},
946 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
},
947 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
},
948 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
},
949 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
},
950 {HCLGE_SUPPORT_200G_BIT
, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
},
953 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap
[6] = {
954 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
},
955 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
},
956 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
},
957 {HCLGE_SUPPORT_100G_R4_BIT
,
958 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
},
959 {HCLGE_SUPPORT_100G_R2_BIT
,
960 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
},
961 {HCLGE_SUPPORT_200G_BIT
,
962 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
},
965 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap
[8] = {
966 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
},
967 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
},
968 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
},
969 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
},
970 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
},
971 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
},
972 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
},
973 {HCLGE_SUPPORT_200G_BIT
, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
},
976 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap
[9] = {
977 {HCLGE_SUPPORT_1G_BIT
, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
},
978 {HCLGE_SUPPORT_10G_BIT
, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
},
979 {HCLGE_SUPPORT_25G_BIT
, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
},
980 {HCLGE_SUPPORT_40G_BIT
, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
},
981 {HCLGE_SUPPORT_50G_R2_BIT
, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
},
982 {HCLGE_SUPPORT_50G_R1_BIT
, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
},
983 {HCLGE_SUPPORT_100G_R4_BIT
, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
},
984 {HCLGE_SUPPORT_100G_R2_BIT
, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
},
985 {HCLGE_SUPPORT_200G_BIT
, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
},
988 static void hclge_convert_setting_sr(u16 speed_ability
,
989 unsigned long *link_mode
)
993 for (i
= 0; i
< ARRAY_SIZE(hclge_sr_link_mode_bmap
); i
++) {
994 if (speed_ability
& hclge_sr_link_mode_bmap
[i
].support_bit
)
995 linkmode_set_bit(hclge_sr_link_mode_bmap
[i
].link_mode
,
1000 static void hclge_convert_setting_lr(u16 speed_ability
,
1001 unsigned long *link_mode
)
1005 for (i
= 0; i
< ARRAY_SIZE(hclge_lr_link_mode_bmap
); i
++) {
1006 if (speed_ability
& hclge_lr_link_mode_bmap
[i
].support_bit
)
1007 linkmode_set_bit(hclge_lr_link_mode_bmap
[i
].link_mode
,
1012 static void hclge_convert_setting_cr(u16 speed_ability
,
1013 unsigned long *link_mode
)
1017 for (i
= 0; i
< ARRAY_SIZE(hclge_cr_link_mode_bmap
); i
++) {
1018 if (speed_ability
& hclge_cr_link_mode_bmap
[i
].support_bit
)
1019 linkmode_set_bit(hclge_cr_link_mode_bmap
[i
].link_mode
,
1024 static void hclge_convert_setting_kr(u16 speed_ability
,
1025 unsigned long *link_mode
)
1029 for (i
= 0; i
< ARRAY_SIZE(hclge_kr_link_mode_bmap
); i
++) {
1030 if (speed_ability
& hclge_kr_link_mode_bmap
[i
].support_bit
)
1031 linkmode_set_bit(hclge_kr_link_mode_bmap
[i
].link_mode
,
1036 static void hclge_convert_setting_fec(struct hclge_mac
*mac
)
1038 /* If firmware has reported fec_ability, don't need to convert by speed */
1039 if (mac
->fec_ability
)
1042 switch (mac
->speed
) {
1043 case HCLGE_MAC_SPEED_10G
:
1044 case HCLGE_MAC_SPEED_40G
:
1045 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_AUTO
) |
1046 BIT(HNAE3_FEC_NONE
);
1048 case HCLGE_MAC_SPEED_25G
:
1049 case HCLGE_MAC_SPEED_50G
:
1050 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_RS
) |
1051 BIT(HNAE3_FEC_AUTO
) | BIT(HNAE3_FEC_NONE
);
1053 case HCLGE_MAC_SPEED_100G
:
1054 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1055 BIT(HNAE3_FEC_NONE
);
1057 case HCLGE_MAC_SPEED_200G
:
1058 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1059 BIT(HNAE3_FEC_LLRS
);
1062 mac
->fec_ability
= 0;
1067 hclge_update_fec_support(mac
);
1070 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
1073 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1075 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1076 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
1079 hclge_convert_setting_sr(speed_ability
, mac
->supported
);
1080 hclge_convert_setting_lr(speed_ability
, mac
->supported
);
1081 hclge_convert_setting_cr(speed_ability
, mac
->supported
);
1082 if (hnae3_dev_fec_supported(hdev
))
1083 hclge_convert_setting_fec(mac
);
1085 if (hnae3_dev_pause_supported(hdev
))
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, mac
->supported
);
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1092 static void hclge_parse_backplane_link_mode(struct hclge_dev
*hdev
,
1095 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1097 hclge_convert_setting_kr(speed_ability
, mac
->supported
);
1098 if (hnae3_dev_fec_supported(hdev
))
1099 hclge_convert_setting_fec(mac
);
1101 if (hnae3_dev_pause_supported(hdev
))
1102 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT
, mac
->supported
);
1105 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1108 static void hclge_parse_copper_link_mode(struct hclge_dev
*hdev
,
1111 unsigned long *supported
= hdev
->hw
.mac
.supported
;
1113 /* default to support all speed for GE port */
1115 speed_ability
= HCLGE_SUPPORT_GE
;
1117 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1121 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
) {
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
1128 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
) {
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
, supported
);
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
, supported
);
1133 if (hnae3_dev_pause_supported(hdev
)) {
1134 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
1135 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
, supported
);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, supported
);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, supported
);
1142 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u16 speed_ability
)
1144 u8 media_type
= hdev
->hw
.mac
.media_type
;
1146 if (media_type
== HNAE3_MEDIA_TYPE_FIBER
)
1147 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
1148 else if (media_type
== HNAE3_MEDIA_TYPE_COPPER
)
1149 hclge_parse_copper_link_mode(hdev
, speed_ability
);
1150 else if (media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
)
1151 hclge_parse_backplane_link_mode(hdev
, speed_ability
);
1154 static u32
hclge_get_max_speed(u16 speed_ability
)
1156 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
1157 return HCLGE_MAC_SPEED_200G
;
1159 if (speed_ability
& HCLGE_SUPPORT_100G_BITS
)
1160 return HCLGE_MAC_SPEED_100G
;
1162 if (speed_ability
& HCLGE_SUPPORT_50G_BITS
)
1163 return HCLGE_MAC_SPEED_50G
;
1165 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
1166 return HCLGE_MAC_SPEED_40G
;
1168 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1169 return HCLGE_MAC_SPEED_25G
;
1171 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1172 return HCLGE_MAC_SPEED_10G
;
1174 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1175 return HCLGE_MAC_SPEED_1G
;
1177 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
)
1178 return HCLGE_MAC_SPEED_100M
;
1180 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
)
1181 return HCLGE_MAC_SPEED_10M
;
1183 return HCLGE_MAC_SPEED_1G
;
1186 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
1188 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1189 #define SPEED_ABILITY_EXT_SHIFT 8
1191 struct hclge_cfg_param_cmd
*req
;
1192 u64 mac_addr_tmp_high
;
1193 u16 speed_ability_ext
;
1197 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
1199 /* get the configuration */
1200 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1201 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
1202 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1203 HCLGE_CFG_TQP_DESC_N_M
,
1204 HCLGE_CFG_TQP_DESC_N_S
);
1206 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1207 HCLGE_CFG_PHY_ADDR_M
,
1208 HCLGE_CFG_PHY_ADDR_S
);
1209 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1210 HCLGE_CFG_MEDIA_TP_M
,
1211 HCLGE_CFG_MEDIA_TP_S
);
1212 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1213 HCLGE_CFG_RX_BUF_LEN_M
,
1214 HCLGE_CFG_RX_BUF_LEN_S
);
1215 /* get mac_address */
1216 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1217 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1218 HCLGE_CFG_MAC_ADDR_H_M
,
1219 HCLGE_CFG_MAC_ADDR_H_S
);
1221 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1223 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1224 HCLGE_CFG_DEFAULT_SPEED_M
,
1225 HCLGE_CFG_DEFAULT_SPEED_S
);
1226 cfg
->vf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1227 HCLGE_CFG_RSS_SIZE_M
,
1228 HCLGE_CFG_RSS_SIZE_S
);
1230 for (i
= 0; i
< ETH_ALEN
; i
++)
1231 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1233 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1234 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1236 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1237 HCLGE_CFG_SPEED_ABILITY_M
,
1238 HCLGE_CFG_SPEED_ABILITY_S
);
1239 speed_ability_ext
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1240 HCLGE_CFG_SPEED_ABILITY_EXT_M
,
1241 HCLGE_CFG_SPEED_ABILITY_EXT_S
);
1242 cfg
->speed_ability
|= speed_ability_ext
<< SPEED_ABILITY_EXT_SHIFT
;
1244 cfg
->vlan_fliter_cap
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1245 HCLGE_CFG_VLAN_FLTR_CAP_M
,
1246 HCLGE_CFG_VLAN_FLTR_CAP_S
);
1248 cfg
->umv_space
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1249 HCLGE_CFG_UMV_TBL_SPACE_M
,
1250 HCLGE_CFG_UMV_TBL_SPACE_S
);
1252 cfg
->pf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1253 HCLGE_CFG_PF_RSS_SIZE_M
,
1254 HCLGE_CFG_PF_RSS_SIZE_S
);
1256 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1257 * power of 2, instead of reading out directly. This would
1258 * be more flexible for future changes and expansions.
1259 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1260 * it does not make sense if PF's field is 0. In this case, PF and VF
1261 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1263 cfg
->pf_rss_size_max
= cfg
->pf_rss_size_max
?
1264 1U << cfg
->pf_rss_size_max
:
1265 cfg
->vf_rss_size_max
;
1267 /* The unit of the tx spare buffer size queried from configuration
1268 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1271 cfg
->tx_spare_buf_size
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1272 HCLGE_CFG_TX_SPARE_BUF_SIZE_M
,
1273 HCLGE_CFG_TX_SPARE_BUF_SIZE_S
);
1274 cfg
->tx_spare_buf_size
*= HCLGE_TX_SPARE_SIZE_UNIT
;
1277 /* hclge_get_cfg: query the static parameter from flash
1278 * @hdev: pointer to struct hclge_dev
1279 * @hcfg: the config structure to be getted
1281 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1283 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1284 struct hclge_cfg_param_cmd
*req
;
1288 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1291 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1292 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1294 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1295 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1296 /* Len should be united by 4 bytes when send to hardware */
1297 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1298 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1299 req
->offset
= cpu_to_le32(offset
);
1302 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1304 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
1308 hclge_parse_cfg(hcfg
, desc
);
1313 static void hclge_set_default_dev_specs(struct hclge_dev
*hdev
)
1315 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1317 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1319 ae_dev
->dev_specs
.max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1320 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1321 ae_dev
->dev_specs
.rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1322 ae_dev
->dev_specs
.max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1323 ae_dev
->dev_specs
.max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1324 ae_dev
->dev_specs
.max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1325 ae_dev
->dev_specs
.max_qset_num
= HCLGE_MAX_QSET_NUM
;
1326 ae_dev
->dev_specs
.umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1327 ae_dev
->dev_specs
.tnl_num
= 0;
1330 static void hclge_parse_dev_specs(struct hclge_dev
*hdev
,
1331 struct hclge_desc
*desc
)
1333 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1334 struct hclge_dev_specs_0_cmd
*req0
;
1335 struct hclge_dev_specs_1_cmd
*req1
;
1337 req0
= (struct hclge_dev_specs_0_cmd
*)desc
[0].data
;
1338 req1
= (struct hclge_dev_specs_1_cmd
*)desc
[1].data
;
1340 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
1341 ae_dev
->dev_specs
.rss_ind_tbl_size
=
1342 le16_to_cpu(req0
->rss_ind_tbl_size
);
1343 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
1344 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
1345 ae_dev
->dev_specs
.max_tm_rate
= le32_to_cpu(req0
->max_tm_rate
);
1346 ae_dev
->dev_specs
.max_qset_num
= le16_to_cpu(req1
->max_qset_num
);
1347 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
1348 ae_dev
->dev_specs
.max_frm_size
= le16_to_cpu(req1
->max_frm_size
);
1349 ae_dev
->dev_specs
.umv_size
= le16_to_cpu(req1
->umv_size
);
1350 ae_dev
->dev_specs
.mc_mac_size
= le16_to_cpu(req1
->mc_mac_size
);
1351 ae_dev
->dev_specs
.tnl_num
= req1
->tnl_num
;
1354 static void hclge_check_dev_specs(struct hclge_dev
*hdev
)
1356 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
1358 if (!dev_specs
->max_non_tso_bd_num
)
1359 dev_specs
->max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1360 if (!dev_specs
->rss_ind_tbl_size
)
1361 dev_specs
->rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1362 if (!dev_specs
->rss_key_size
)
1363 dev_specs
->rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1364 if (!dev_specs
->max_tm_rate
)
1365 dev_specs
->max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1366 if (!dev_specs
->max_qset_num
)
1367 dev_specs
->max_qset_num
= HCLGE_MAX_QSET_NUM
;
1368 if (!dev_specs
->max_int_gl
)
1369 dev_specs
->max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1370 if (!dev_specs
->max_frm_size
)
1371 dev_specs
->max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1372 if (!dev_specs
->umv_size
)
1373 dev_specs
->umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1376 static int hclge_query_mac_stats_num(struct hclge_dev
*hdev
)
1381 ret
= hclge_mac_query_reg_num(hdev
, ®_num
);
1382 if (ret
&& ret
!= -EOPNOTSUPP
)
1385 hdev
->ae_dev
->dev_specs
.mac_stats_num
= reg_num
;
1389 static int hclge_query_dev_specs(struct hclge_dev
*hdev
)
1391 struct hclge_desc desc
[HCLGE_QUERY_DEV_SPECS_BD_NUM
];
1395 ret
= hclge_query_mac_stats_num(hdev
);
1399 /* set default specifications as devices lower than version V3 do not
1400 * support querying specifications from firmware.
1402 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
1403 hclge_set_default_dev_specs(hdev
);
1407 for (i
= 0; i
< HCLGE_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
1408 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
,
1410 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
1412 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
, true);
1414 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_QUERY_DEV_SPECS_BD_NUM
);
1418 hclge_parse_dev_specs(hdev
, desc
);
1419 hclge_check_dev_specs(hdev
);
1424 static int hclge_get_cap(struct hclge_dev
*hdev
)
1428 ret
= hclge_query_function_status(hdev
);
1430 dev_err(&hdev
->pdev
->dev
,
1431 "query function status error %d.\n", ret
);
1435 /* get pf resource */
1436 return hclge_query_pf_resource(hdev
);
1439 static void hclge_init_kdump_kernel_config(struct hclge_dev
*hdev
)
1441 #define HCLGE_MIN_TX_DESC 64
1442 #define HCLGE_MIN_RX_DESC 64
1444 if (!is_kdump_kernel())
1447 dev_info(&hdev
->pdev
->dev
,
1448 "Running kdump kernel. Using minimal resources\n");
1450 /* minimal queue pairs equals to the number of vports */
1451 hdev
->num_tqps
= hdev
->num_req_vfs
+ 1;
1452 hdev
->num_tx_desc
= HCLGE_MIN_TX_DESC
;
1453 hdev
->num_rx_desc
= HCLGE_MIN_RX_DESC
;
1456 static void hclge_init_tc_config(struct hclge_dev
*hdev
)
1460 if (hdev
->tc_max
> HNAE3_MAX_TC
||
1462 dev_warn(&hdev
->pdev
->dev
, "TC num = %u.\n",
1467 /* Dev does not support DCB */
1468 if (!hnae3_dev_dcb_supported(hdev
)) {
1472 hdev
->pfc_max
= hdev
->tc_max
;
1475 hdev
->tm_info
.num_tc
= 1;
1477 /* Currently not support uncontiuous tc */
1478 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1479 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
1481 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1484 static int hclge_configure(struct hclge_dev
*hdev
)
1486 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1487 struct hclge_cfg cfg
;
1490 ret
= hclge_get_cfg(hdev
, &cfg
);
1494 hdev
->base_tqp_pid
= 0;
1495 hdev
->vf_rss_size_max
= cfg
.vf_rss_size_max
;
1496 hdev
->pf_rss_size_max
= cfg
.pf_rss_size_max
;
1497 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1498 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1499 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1500 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1501 hdev
->num_tx_desc
= cfg
.tqp_desc_num
;
1502 hdev
->num_rx_desc
= cfg
.tqp_desc_num
;
1503 hdev
->tm_info
.num_pg
= 1;
1504 hdev
->tc_max
= cfg
.tc_num
;
1505 hdev
->tm_info
.hw_pfc_map
= 0;
1507 hdev
->wanted_umv_size
= cfg
.umv_space
;
1509 hdev
->wanted_umv_size
= hdev
->ae_dev
->dev_specs
.umv_size
;
1510 hdev
->tx_spare_buf_size
= cfg
.tx_spare_buf_size
;
1511 hdev
->gro_en
= true;
1512 if (cfg
.vlan_fliter_cap
== HCLGE_VLAN_FLTR_CAN_MDF
)
1513 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
);
1515 if (hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
1517 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
1520 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1522 dev_err(&hdev
->pdev
->dev
, "failed to parse speed %u, ret = %d\n",
1523 cfg
.default_speed
, ret
);
1527 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
1529 hdev
->hw
.mac
.max_speed
= hclge_get_max_speed(cfg
.speed_ability
);
1531 hclge_init_tc_config(hdev
);
1532 hclge_init_kdump_kernel_config(hdev
);
1537 static int hclge_config_tso(struct hclge_dev
*hdev
, u16 tso_mss_min
,
1540 struct hclge_cfg_tso_status_cmd
*req
;
1541 struct hclge_desc desc
;
1543 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1545 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1546 req
->tso_mss_min
= cpu_to_le16(tso_mss_min
);
1547 req
->tso_mss_max
= cpu_to_le16(tso_mss_max
);
1549 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1552 static int hclge_config_gro(struct hclge_dev
*hdev
)
1554 struct hclge_cfg_gro_status_cmd
*req
;
1555 struct hclge_desc desc
;
1558 if (!hnae3_ae_dev_gro_supported(hdev
->ae_dev
))
1561 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
, false);
1562 req
= (struct hclge_cfg_gro_status_cmd
*)desc
.data
;
1564 req
->gro_en
= hdev
->gro_en
? 1 : 0;
1566 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1568 dev_err(&hdev
->pdev
->dev
,
1569 "GRO hardware config cmd failed, ret = %d\n", ret
);
1574 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1576 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1577 struct hclge_comm_tqp
*tqp
;
1580 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1581 sizeof(struct hclge_comm_tqp
), GFP_KERNEL
);
1587 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1588 tqp
->dev
= &hdev
->pdev
->dev
;
1591 tqp
->q
.ae_algo
= &ae_algo
;
1592 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1593 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
1594 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
1596 /* need an extended offset to configure queues >=
1597 * HCLGE_TQP_MAX_SIZE_DEV_V2
1599 if (i
< HCLGE_TQP_MAX_SIZE_DEV_V2
)
1600 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1601 HCLGE_TQP_REG_OFFSET
+
1602 i
* HCLGE_TQP_REG_SIZE
;
1604 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1605 HCLGE_TQP_REG_OFFSET
+
1606 HCLGE_TQP_EXT_REG_OFFSET
+
1607 (i
- HCLGE_TQP_MAX_SIZE_DEV_V2
) *
1610 /* when device supports tx push and has device memory,
1611 * the queue can execute push mode or doorbell mode on
1614 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
1615 tqp
->q
.mem_base
= hdev
->hw
.hw
.mem_base
+
1616 HCLGE_TQP_MEM_OFFSET(hdev
, i
);
1624 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1625 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1627 struct hclge_tqp_map_cmd
*req
;
1628 struct hclge_desc desc
;
1631 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1633 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1634 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1635 req
->tqp_vf
= func_id
;
1636 req
->tqp_flag
= 1U << HCLGE_TQP_MAP_EN_B
;
1638 req
->tqp_flag
|= 1U << HCLGE_TQP_MAP_TYPE_B
;
1639 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1641 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1643 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1648 static int hclge_assign_tqp(struct hclge_vport
*vport
, u16 num_tqps
)
1650 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1651 struct hclge_dev
*hdev
= vport
->back
;
1654 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1655 alloced
< num_tqps
; i
++) {
1656 if (!hdev
->htqp
[i
].alloced
) {
1657 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1658 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1659 hdev
->htqp
[i
].q
.tx_desc_num
= kinfo
->num_tx_desc
;
1660 hdev
->htqp
[i
].q
.rx_desc_num
= kinfo
->num_rx_desc
;
1661 kinfo
->tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1662 hdev
->htqp
[i
].alloced
= true;
1666 vport
->alloc_tqps
= alloced
;
1667 kinfo
->rss_size
= min_t(u16
, hdev
->pf_rss_size_max
,
1668 vport
->alloc_tqps
/ hdev
->tm_info
.num_tc
);
1670 /* ensure one to one mapping between irq and queue at default */
1671 kinfo
->rss_size
= min_t(u16
, kinfo
->rss_size
,
1672 (hdev
->num_nic_msi
- 1) / hdev
->tm_info
.num_tc
);
1677 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
,
1678 u16 num_tx_desc
, u16 num_rx_desc
)
1681 struct hnae3_handle
*nic
= &vport
->nic
;
1682 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1683 struct hclge_dev
*hdev
= vport
->back
;
1686 kinfo
->num_tx_desc
= num_tx_desc
;
1687 kinfo
->num_rx_desc
= num_rx_desc
;
1689 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1690 kinfo
->tx_spare_buf_size
= hdev
->tx_spare_buf_size
;
1692 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, num_tqps
,
1693 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1697 ret
= hclge_assign_tqp(vport
, num_tqps
);
1699 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1704 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1705 struct hclge_vport
*vport
)
1707 struct hnae3_handle
*nic
= &vport
->nic
;
1708 struct hnae3_knic_private_info
*kinfo
;
1711 kinfo
= &nic
->kinfo
;
1712 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
1713 struct hclge_comm_tqp
*q
=
1714 container_of(kinfo
->tqp
[i
], struct hclge_comm_tqp
, q
);
1718 is_pf
= !(vport
->vport_id
);
1719 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1728 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1730 struct hclge_vport
*vport
= hdev
->vport
;
1733 num_vport
= hdev
->num_req_vfs
+ 1;
1734 for (i
= 0; i
< num_vport
; i
++) {
1737 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1747 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1749 struct hnae3_handle
*nic
= &vport
->nic
;
1750 struct hclge_dev
*hdev
= vport
->back
;
1753 nic
->pdev
= hdev
->pdev
;
1754 nic
->ae_algo
= &ae_algo
;
1755 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1756 nic
->kinfo
.io_base
= hdev
->hw
.hw
.io_base
;
1758 ret
= hclge_knic_setup(vport
, num_tqps
,
1759 hdev
->num_tx_desc
, hdev
->num_rx_desc
);
1761 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n", ret
);
1766 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1768 struct pci_dev
*pdev
= hdev
->pdev
;
1769 struct hclge_vport
*vport
;
1775 /* We need to alloc a vport for main NIC of PF */
1776 num_vport
= hdev
->num_req_vfs
+ 1;
1778 if (hdev
->num_tqps
< num_vport
) {
1779 dev_err(&hdev
->pdev
->dev
, "tqps(%u) is less than vports(%d)",
1780 hdev
->num_tqps
, num_vport
);
1784 /* Alloc the same number of TQPs for every vport */
1785 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1786 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1788 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1793 hdev
->vport
= vport
;
1794 hdev
->num_alloc_vport
= num_vport
;
1796 if (IS_ENABLED(CONFIG_PCI_IOV
))
1797 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1799 for (i
= 0; i
< num_vport
; i
++) {
1801 vport
->vport_id
= i
;
1802 vport
->vf_info
.link_state
= IFLA_VF_LINK_STATE_AUTO
;
1803 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
1804 vport
->port_base_vlan_cfg
.state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
1805 vport
->port_base_vlan_cfg
.tbl_sta
= true;
1806 vport
->rxvlan_cfg
.rx_vlan_offload_en
= true;
1807 vport
->req_vlan_fltr_en
= true;
1808 INIT_LIST_HEAD(&vport
->vlan_list
);
1809 INIT_LIST_HEAD(&vport
->uc_mac_list
);
1810 INIT_LIST_HEAD(&vport
->mc_mac_list
);
1811 spin_lock_init(&vport
->mac_list_lock
);
1814 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1816 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1819 "vport setup failed for vport %d, %d\n",
1830 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1831 struct hclge_pkt_buf_alloc
*buf_alloc
)
1833 /* TX buffer size is unit by 128 byte */
1834 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1835 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1836 struct hclge_tx_buff_alloc_cmd
*req
;
1837 struct hclge_desc desc
;
1841 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1843 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1844 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1845 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1847 req
->tx_pkt_buff
[i
] =
1848 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1849 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1852 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1854 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1860 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1861 struct hclge_pkt_buf_alloc
*buf_alloc
)
1863 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1866 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1871 static u32
hclge_get_tc_num(struct hclge_dev
*hdev
)
1876 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1877 if (hdev
->hw_tc_map
& BIT(i
))
1882 /* Get the number of pfc enabled TCs, which have private buffer */
1883 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1884 struct hclge_pkt_buf_alloc
*buf_alloc
)
1886 struct hclge_priv_buf
*priv
;
1890 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1891 priv
= &buf_alloc
->priv_buf
[i
];
1892 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1900 /* Get the number of pfc disabled TCs, which have private buffer */
1901 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1902 struct hclge_pkt_buf_alloc
*buf_alloc
)
1904 struct hclge_priv_buf
*priv
;
1908 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1909 priv
= &buf_alloc
->priv_buf
[i
];
1910 if (hdev
->hw_tc_map
& BIT(i
) &&
1911 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1919 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1921 struct hclge_priv_buf
*priv
;
1925 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1926 priv
= &buf_alloc
->priv_buf
[i
];
1928 rx_priv
+= priv
->buf_size
;
1933 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1935 u32 i
, total_tx_size
= 0;
1937 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1938 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1940 return total_tx_size
;
1943 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1944 struct hclge_pkt_buf_alloc
*buf_alloc
,
1947 u32 shared_buf_min
, shared_buf_tc
, shared_std
, hi_thrd
, lo_thrd
;
1948 u32 tc_num
= hclge_get_tc_num(hdev
);
1949 u32 shared_buf
, aligned_mps
;
1953 aligned_mps
= roundup(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
1955 if (hnae3_dev_dcb_supported(hdev
))
1956 shared_buf_min
= HCLGE_BUF_MUL_BY
* aligned_mps
+
1959 shared_buf_min
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
1960 + hdev
->dv_buf_size
;
1962 shared_buf_tc
= tc_num
* aligned_mps
+ aligned_mps
;
1963 shared_std
= roundup(max_t(u32
, shared_buf_min
, shared_buf_tc
),
1964 HCLGE_BUF_SIZE_UNIT
);
1966 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1967 if (rx_all
< rx_priv
+ shared_std
)
1970 shared_buf
= rounddown(rx_all
- rx_priv
, HCLGE_BUF_SIZE_UNIT
);
1971 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1972 if (hnae3_dev_dcb_supported(hdev
)) {
1973 buf_alloc
->s_buf
.self
.high
= shared_buf
- hdev
->dv_buf_size
;
1974 buf_alloc
->s_buf
.self
.low
= buf_alloc
->s_buf
.self
.high
1975 - roundup(aligned_mps
/ HCLGE_BUF_DIV_BY
,
1976 HCLGE_BUF_SIZE_UNIT
);
1978 buf_alloc
->s_buf
.self
.high
= aligned_mps
+
1979 HCLGE_NON_DCB_ADDITIONAL_BUF
;
1980 buf_alloc
->s_buf
.self
.low
= aligned_mps
;
1983 if (hnae3_dev_dcb_supported(hdev
)) {
1984 hi_thrd
= shared_buf
- hdev
->dv_buf_size
;
1986 if (tc_num
<= NEED_RESERVE_TC_NUM
)
1987 hi_thrd
= hi_thrd
* BUF_RESERVE_PERCENT
1991 hi_thrd
= hi_thrd
/ tc_num
;
1993 hi_thrd
= max_t(u32
, hi_thrd
, HCLGE_BUF_MUL_BY
* aligned_mps
);
1994 hi_thrd
= rounddown(hi_thrd
, HCLGE_BUF_SIZE_UNIT
);
1995 lo_thrd
= hi_thrd
- aligned_mps
/ HCLGE_BUF_DIV_BY
;
1997 hi_thrd
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
;
1998 lo_thrd
= aligned_mps
;
2001 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2002 buf_alloc
->s_buf
.tc_thrd
[i
].low
= lo_thrd
;
2003 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hi_thrd
;
2009 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
2010 struct hclge_pkt_buf_alloc
*buf_alloc
)
2014 total_size
= hdev
->pkt_buf_size
;
2016 /* alloc tx buffer for all enabled tc */
2017 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2018 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2020 if (hdev
->hw_tc_map
& BIT(i
)) {
2021 if (total_size
< hdev
->tx_buf_size
)
2024 priv
->tx_buf_size
= hdev
->tx_buf_size
;
2026 priv
->tx_buf_size
= 0;
2029 total_size
-= priv
->tx_buf_size
;
2035 static bool hclge_rx_buf_calc_all(struct hclge_dev
*hdev
, bool max
,
2036 struct hclge_pkt_buf_alloc
*buf_alloc
)
2038 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2039 u32 aligned_mps
= round_up(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
2042 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2043 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2050 if (!(hdev
->hw_tc_map
& BIT(i
)))
2055 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
2056 priv
->wl
.low
= max
? aligned_mps
: HCLGE_BUF_SIZE_UNIT
;
2057 priv
->wl
.high
= roundup(priv
->wl
.low
+ aligned_mps
,
2058 HCLGE_BUF_SIZE_UNIT
);
2061 priv
->wl
.high
= max
? (aligned_mps
* HCLGE_BUF_MUL_BY
) :
2065 priv
->buf_size
= priv
->wl
.high
+ hdev
->dv_buf_size
;
2068 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2071 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev
*hdev
,
2072 struct hclge_pkt_buf_alloc
*buf_alloc
)
2074 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2075 int no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
2078 /* let the last to be cleared first */
2079 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2080 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2081 unsigned int mask
= BIT((unsigned int)i
);
2083 if (hdev
->hw_tc_map
& mask
&&
2084 !(hdev
->tm_info
.hw_pfc_map
& mask
)) {
2085 /* Clear the no pfc TC private buffer */
2093 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2094 no_pfc_priv_num
== 0)
2098 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2101 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev
*hdev
,
2102 struct hclge_pkt_buf_alloc
*buf_alloc
)
2104 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2105 int pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
2108 /* let the last to be cleared first */
2109 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2110 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2111 unsigned int mask
= BIT((unsigned int)i
);
2113 if (hdev
->hw_tc_map
& mask
&&
2114 hdev
->tm_info
.hw_pfc_map
& mask
) {
2115 /* Reduce the number of pfc TC with private buffer */
2123 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2128 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2131 static int hclge_only_alloc_priv_buff(struct hclge_dev
*hdev
,
2132 struct hclge_pkt_buf_alloc
*buf_alloc
)
2134 #define COMPENSATE_BUFFER 0x3C00
2135 #define COMPENSATE_HALF_MPS_NUM 5
2136 #define PRIV_WL_GAP 0x1800
2138 u32 rx_priv
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2139 u32 tc_num
= hclge_get_tc_num(hdev
);
2140 u32 half_mps
= hdev
->mps
>> 1;
2145 rx_priv
= rx_priv
/ tc_num
;
2147 if (tc_num
<= NEED_RESERVE_TC_NUM
)
2148 rx_priv
= rx_priv
* BUF_RESERVE_PERCENT
/ BUF_MAX_PERCENT
;
2150 min_rx_priv
= hdev
->dv_buf_size
+ COMPENSATE_BUFFER
+
2151 COMPENSATE_HALF_MPS_NUM
* half_mps
;
2152 min_rx_priv
= round_up(min_rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2153 rx_priv
= round_down(rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2154 if (rx_priv
< min_rx_priv
)
2157 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2158 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2165 if (!(hdev
->hw_tc_map
& BIT(i
)))
2169 priv
->buf_size
= rx_priv
;
2170 priv
->wl
.high
= rx_priv
- hdev
->dv_buf_size
;
2171 priv
->wl
.low
= priv
->wl
.high
- PRIV_WL_GAP
;
2174 buf_alloc
->s_buf
.buf_size
= 0;
2179 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2180 * @hdev: pointer to struct hclge_dev
2181 * @buf_alloc: pointer to buffer calculation data
2182 * @return: 0: calculate successful, negative: fail
2184 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
2185 struct hclge_pkt_buf_alloc
*buf_alloc
)
2187 /* When DCB is not supported, rx private buffer is not allocated. */
2188 if (!hnae3_dev_dcb_supported(hdev
)) {
2189 u32 rx_all
= hdev
->pkt_buf_size
;
2191 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
2192 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
2198 if (hclge_only_alloc_priv_buff(hdev
, buf_alloc
))
2201 if (hclge_rx_buf_calc_all(hdev
, true, buf_alloc
))
2204 /* try to decrease the buffer size */
2205 if (hclge_rx_buf_calc_all(hdev
, false, buf_alloc
))
2208 if (hclge_drop_nopfc_buf_till_fit(hdev
, buf_alloc
))
2211 if (hclge_drop_pfc_buf_till_fit(hdev
, buf_alloc
))
2217 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
2218 struct hclge_pkt_buf_alloc
*buf_alloc
)
2220 struct hclge_rx_priv_buff_cmd
*req
;
2221 struct hclge_desc desc
;
2225 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
2226 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
2228 /* Alloc private buffer TCs */
2229 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2230 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2233 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
2235 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
2239 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
2240 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
2242 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2244 dev_err(&hdev
->pdev
->dev
,
2245 "rx private buffer alloc cmd failed %d\n", ret
);
2250 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
2251 struct hclge_pkt_buf_alloc
*buf_alloc
)
2253 struct hclge_rx_priv_wl_buf
*req
;
2254 struct hclge_priv_buf
*priv
;
2255 struct hclge_desc desc
[2];
2259 for (i
= 0; i
< 2; i
++) {
2260 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
2262 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
2264 /* The first descriptor set the NEXT bit to 1 */
2266 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2268 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2270 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2271 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
2273 priv
= &buf_alloc
->priv_buf
[idx
];
2274 req
->tc_wl
[j
].high
=
2275 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
2276 req
->tc_wl
[j
].high
|=
2277 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2279 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
2280 req
->tc_wl
[j
].low
|=
2281 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2285 /* Send 2 descriptor at one time */
2286 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2288 dev_err(&hdev
->pdev
->dev
,
2289 "rx private waterline config cmd failed %d\n",
2294 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
2295 struct hclge_pkt_buf_alloc
*buf_alloc
)
2297 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
2298 struct hclge_rx_com_thrd
*req
;
2299 struct hclge_desc desc
[2];
2300 struct hclge_tc_thrd
*tc
;
2304 for (i
= 0; i
< 2; i
++) {
2305 hclge_cmd_setup_basic_desc(&desc
[i
],
2306 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
2307 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
2309 /* The first descriptor set the NEXT bit to 1 */
2311 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2313 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2315 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2316 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
2318 req
->com_thrd
[j
].high
=
2319 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
2320 req
->com_thrd
[j
].high
|=
2321 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2322 req
->com_thrd
[j
].low
=
2323 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
2324 req
->com_thrd
[j
].low
|=
2325 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2329 /* Send 2 descriptors at one time */
2330 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2332 dev_err(&hdev
->pdev
->dev
,
2333 "common threshold config cmd failed %d\n", ret
);
2337 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
2338 struct hclge_pkt_buf_alloc
*buf_alloc
)
2340 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
2341 struct hclge_rx_com_wl
*req
;
2342 struct hclge_desc desc
;
2345 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
2347 req
= (struct hclge_rx_com_wl
*)desc
.data
;
2348 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
2349 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2351 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
2352 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2354 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2356 dev_err(&hdev
->pdev
->dev
,
2357 "common waterline config cmd failed %d\n", ret
);
2362 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
2364 struct hclge_pkt_buf_alloc
*pkt_buf
;
2367 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
2371 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
2373 dev_err(&hdev
->pdev
->dev
,
2374 "could not calc tx buffer size for all TCs %d\n", ret
);
2378 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
2380 dev_err(&hdev
->pdev
->dev
,
2381 "could not alloc tx buffers %d\n", ret
);
2385 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
2387 dev_err(&hdev
->pdev
->dev
,
2388 "could not calc rx priv buffer size for all TCs %d\n",
2393 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
2395 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
2400 if (hnae3_dev_dcb_supported(hdev
)) {
2401 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
2403 dev_err(&hdev
->pdev
->dev
,
2404 "could not configure rx private waterline %d\n",
2409 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
2411 dev_err(&hdev
->pdev
->dev
,
2412 "could not configure common threshold %d\n",
2418 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
2420 dev_err(&hdev
->pdev
->dev
,
2421 "could not configure common waterline %d\n", ret
);
2428 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
2430 struct hnae3_handle
*roce
= &vport
->roce
;
2431 struct hnae3_handle
*nic
= &vport
->nic
;
2432 struct hclge_dev
*hdev
= vport
->back
;
2434 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
2436 if (hdev
->num_msi
< hdev
->num_nic_msi
+ hdev
->num_roce_msi
)
2439 roce
->rinfo
.base_vector
= hdev
->num_nic_msi
;
2441 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2442 roce
->rinfo
.roce_io_base
= hdev
->hw
.hw
.io_base
;
2443 roce
->rinfo
.roce_mem_base
= hdev
->hw
.hw
.mem_base
;
2445 roce
->pdev
= nic
->pdev
;
2446 roce
->ae_algo
= nic
->ae_algo
;
2447 roce
->numa_node_mask
= nic
->numa_node_mask
;
2452 static int hclge_init_msi(struct hclge_dev
*hdev
)
2454 struct pci_dev
*pdev
= hdev
->pdev
;
2458 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2460 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2463 "failed(%d) to allocate MSI/MSI-X vectors\n",
2467 if (vectors
< hdev
->num_msi
)
2468 dev_warn(&hdev
->pdev
->dev
,
2469 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2470 hdev
->num_msi
, vectors
);
2472 hdev
->num_msi
= vectors
;
2473 hdev
->num_msi_left
= vectors
;
2475 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2476 sizeof(u16
), GFP_KERNEL
);
2477 if (!hdev
->vector_status
) {
2478 pci_free_irq_vectors(pdev
);
2482 for (i
= 0; i
< hdev
->num_msi
; i
++)
2483 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2485 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2486 sizeof(int), GFP_KERNEL
);
2487 if (!hdev
->vector_irq
) {
2488 pci_free_irq_vectors(pdev
);
2495 static u8
hclge_check_speed_dup(u8 duplex
, int speed
)
2497 if (!(speed
== HCLGE_MAC_SPEED_10M
|| speed
== HCLGE_MAC_SPEED_100M
))
2498 duplex
= HCLGE_MAC_FULL
;
2503 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw
[] = {
2504 {HCLGE_MAC_SPEED_10M
, HCLGE_FW_MAC_SPEED_10M
},
2505 {HCLGE_MAC_SPEED_100M
, HCLGE_FW_MAC_SPEED_100M
},
2506 {HCLGE_MAC_SPEED_1G
, HCLGE_FW_MAC_SPEED_1G
},
2507 {HCLGE_MAC_SPEED_10G
, HCLGE_FW_MAC_SPEED_10G
},
2508 {HCLGE_MAC_SPEED_25G
, HCLGE_FW_MAC_SPEED_25G
},
2509 {HCLGE_MAC_SPEED_40G
, HCLGE_FW_MAC_SPEED_40G
},
2510 {HCLGE_MAC_SPEED_50G
, HCLGE_FW_MAC_SPEED_50G
},
2511 {HCLGE_MAC_SPEED_100G
, HCLGE_FW_MAC_SPEED_100G
},
2512 {HCLGE_MAC_SPEED_200G
, HCLGE_FW_MAC_SPEED_200G
},
2515 static int hclge_convert_to_fw_speed(u32 speed_drv
, u32
*speed_fw
)
2519 for (i
= 0; i
< ARRAY_SIZE(hclge_mac_speed_map_to_fw
); i
++) {
2520 if (hclge_mac_speed_map_to_fw
[i
].speed_drv
== speed_drv
) {
2521 *speed_fw
= hclge_mac_speed_map_to_fw
[i
].speed_fw
;
2529 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev
*hdev
, int speed
,
2530 u8 duplex
, u8 lane_num
)
2532 struct hclge_config_mac_speed_dup_cmd
*req
;
2533 struct hclge_desc desc
;
2537 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2539 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2542 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, 1);
2544 ret
= hclge_convert_to_fw_speed(speed
, &speed_fw
);
2546 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2550 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
, HCLGE_CFG_SPEED_S
,
2552 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2554 req
->lane_num
= lane_num
;
2556 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2558 dev_err(&hdev
->pdev
->dev
,
2559 "mac speed/duplex config cmd failed %d.\n", ret
);
2566 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
, u8 lane_num
)
2568 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2571 duplex
= hclge_check_speed_dup(duplex
, speed
);
2572 if (!mac
->support_autoneg
&& mac
->speed
== speed
&&
2573 mac
->duplex
== duplex
&& (mac
->lane_num
== lane_num
|| lane_num
== 0))
2576 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, speed
, duplex
, lane_num
);
2580 hdev
->hw
.mac
.speed
= speed
;
2581 hdev
->hw
.mac
.duplex
= duplex
;
2583 hdev
->hw
.mac
.lane_num
= lane_num
;
2588 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2589 u8 duplex
, u8 lane_num
)
2591 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2592 struct hclge_dev
*hdev
= vport
->back
;
2594 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
, lane_num
);
2597 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2599 struct hclge_config_auto_neg_cmd
*req
;
2600 struct hclge_desc desc
;
2604 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2606 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2608 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, 1U);
2609 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2611 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2613 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2619 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2621 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2622 struct hclge_dev
*hdev
= vport
->back
;
2624 if (!hdev
->hw
.mac
.support_autoneg
) {
2626 dev_err(&hdev
->pdev
->dev
,
2627 "autoneg is not supported by current port\n");
2634 return hclge_set_autoneg_en(hdev
, enable
);
2637 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2639 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2640 struct hclge_dev
*hdev
= vport
->back
;
2641 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2644 return phydev
->autoneg
;
2646 return hdev
->hw
.mac
.autoneg
;
2649 static int hclge_restart_autoneg(struct hnae3_handle
*handle
)
2651 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2652 struct hclge_dev
*hdev
= vport
->back
;
2655 dev_dbg(&hdev
->pdev
->dev
, "restart autoneg\n");
2657 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2660 return hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2663 static int hclge_halt_autoneg(struct hnae3_handle
*handle
, bool halt
)
2665 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2666 struct hclge_dev
*hdev
= vport
->back
;
2668 if (hdev
->hw
.mac
.support_autoneg
&& hdev
->hw
.mac
.autoneg
)
2669 return hclge_set_autoneg_en(hdev
, !halt
);
2674 static void hclge_parse_fec_stats_lanes(struct hclge_dev
*hdev
,
2675 struct hclge_desc
*desc
, u32 desc_len
)
2677 u32 lane_size
= HCLGE_FEC_STATS_MAX_LANES
* 2;
2682 for (i
= 0; i
< lane_size
; i
++) {
2683 if (data_index
>= HCLGE_DESC_DATA_LEN
) {
2688 if (desc_index
>= desc_len
)
2691 hdev
->fec_stats
.per_lanes
[i
] +=
2692 le32_to_cpu(desc
[desc_index
].data
[data_index
]);
2697 static void hclge_parse_fec_stats(struct hclge_dev
*hdev
,
2698 struct hclge_desc
*desc
, u32 desc_len
)
2700 struct hclge_query_fec_stats_cmd
*req
;
2702 req
= (struct hclge_query_fec_stats_cmd
*)desc
[0].data
;
2704 hdev
->fec_stats
.base_r_lane_num
= req
->base_r_lane_num
;
2705 hdev
->fec_stats
.rs_corr_blocks
+=
2706 le32_to_cpu(req
->rs_fec_corr_blocks
);
2707 hdev
->fec_stats
.rs_uncorr_blocks
+=
2708 le32_to_cpu(req
->rs_fec_uncorr_blocks
);
2709 hdev
->fec_stats
.rs_error_blocks
+=
2710 le32_to_cpu(req
->rs_fec_error_blocks
);
2711 hdev
->fec_stats
.base_r_corr_blocks
+=
2712 le32_to_cpu(req
->base_r_fec_corr_blocks
);
2713 hdev
->fec_stats
.base_r_uncorr_blocks
+=
2714 le32_to_cpu(req
->base_r_fec_uncorr_blocks
);
2716 hclge_parse_fec_stats_lanes(hdev
, &desc
[1], desc_len
- 1);
2719 static int hclge_update_fec_stats_hw(struct hclge_dev
*hdev
)
2721 struct hclge_desc desc
[HCLGE_FEC_STATS_CMD_NUM
];
2725 for (i
= 0; i
< HCLGE_FEC_STATS_CMD_NUM
; i
++) {
2726 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_FEC_STATS
,
2728 if (i
!= (HCLGE_FEC_STATS_CMD_NUM
- 1))
2729 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2732 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2736 hclge_parse_fec_stats(hdev
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2741 static void hclge_update_fec_stats(struct hclge_dev
*hdev
)
2743 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2746 if (!hnae3_ae_dev_fec_stats_supported(ae_dev
) ||
2747 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
))
2750 ret
= hclge_update_fec_stats_hw(hdev
);
2752 dev_err(&hdev
->pdev
->dev
,
2753 "failed to update fec stats, ret = %d\n", ret
);
2755 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
);
2758 static void hclge_get_fec_stats_total(struct hclge_dev
*hdev
,
2759 struct ethtool_fec_stats
*fec_stats
)
2761 fec_stats
->corrected_blocks
.total
= hdev
->fec_stats
.rs_corr_blocks
;
2762 fec_stats
->uncorrectable_blocks
.total
=
2763 hdev
->fec_stats
.rs_uncorr_blocks
;
2766 static void hclge_get_fec_stats_lanes(struct hclge_dev
*hdev
,
2767 struct ethtool_fec_stats
*fec_stats
)
2771 if (hdev
->fec_stats
.base_r_lane_num
== 0 ||
2772 hdev
->fec_stats
.base_r_lane_num
> HCLGE_FEC_STATS_MAX_LANES
) {
2773 dev_err(&hdev
->pdev
->dev
,
2774 "fec stats lane number(%llu) is invalid\n",
2775 hdev
->fec_stats
.base_r_lane_num
);
2779 for (i
= 0; i
< hdev
->fec_stats
.base_r_lane_num
; i
++) {
2780 fec_stats
->corrected_blocks
.lanes
[i
] =
2781 hdev
->fec_stats
.base_r_corr_per_lanes
[i
];
2782 fec_stats
->uncorrectable_blocks
.lanes
[i
] =
2783 hdev
->fec_stats
.base_r_uncorr_per_lanes
[i
];
2787 static void hclge_comm_get_fec_stats(struct hclge_dev
*hdev
,
2788 struct ethtool_fec_stats
*fec_stats
)
2790 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2793 case BIT(HNAE3_FEC_RS
):
2794 case BIT(HNAE3_FEC_LLRS
):
2795 hclge_get_fec_stats_total(hdev
, fec_stats
);
2797 case BIT(HNAE3_FEC_BASER
):
2798 hclge_get_fec_stats_lanes(hdev
, fec_stats
);
2801 dev_err(&hdev
->pdev
->dev
,
2802 "fec stats is not supported by current fec mode(0x%x)\n",
2808 static void hclge_get_fec_stats(struct hnae3_handle
*handle
,
2809 struct ethtool_fec_stats
*fec_stats
)
2811 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2812 struct hclge_dev
*hdev
= vport
->back
;
2813 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2815 if (fec_mode
== BIT(HNAE3_FEC_NONE
) ||
2816 fec_mode
== BIT(HNAE3_FEC_AUTO
) ||
2817 fec_mode
== BIT(HNAE3_FEC_USER_DEF
))
2820 hclge_update_fec_stats(hdev
);
2822 hclge_comm_get_fec_stats(hdev
, fec_stats
);
2825 static int hclge_set_fec_hw(struct hclge_dev
*hdev
, u32 fec_mode
)
2827 struct hclge_config_fec_cmd
*req
;
2828 struct hclge_desc desc
;
2831 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_FEC_MODE
, false);
2833 req
= (struct hclge_config_fec_cmd
*)desc
.data
;
2834 if (fec_mode
& BIT(HNAE3_FEC_AUTO
))
2835 hnae3_set_bit(req
->fec_mode
, HCLGE_MAC_CFG_FEC_AUTO_EN_B
, 1);
2836 if (fec_mode
& BIT(HNAE3_FEC_RS
))
2837 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2838 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_RS
);
2839 if (fec_mode
& BIT(HNAE3_FEC_LLRS
))
2840 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2841 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_LLRS
);
2842 if (fec_mode
& BIT(HNAE3_FEC_BASER
))
2843 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2844 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_BASER
);
2846 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2848 dev_err(&hdev
->pdev
->dev
, "set fec mode failed %d.\n", ret
);
2853 static int hclge_set_fec(struct hnae3_handle
*handle
, u32 fec_mode
)
2855 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2856 struct hclge_dev
*hdev
= vport
->back
;
2857 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2860 if (fec_mode
&& !(mac
->fec_ability
& fec_mode
)) {
2861 dev_err(&hdev
->pdev
->dev
, "unsupported fec mode\n");
2865 ret
= hclge_set_fec_hw(hdev
, fec_mode
);
2869 mac
->user_fec_mode
= fec_mode
| BIT(HNAE3_FEC_USER_DEF
);
2873 static void hclge_get_fec(struct hnae3_handle
*handle
, u8
*fec_ability
,
2876 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2877 struct hclge_dev
*hdev
= vport
->back
;
2878 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2881 *fec_ability
= mac
->fec_ability
;
2883 *fec_mode
= mac
->fec_mode
;
2886 static int hclge_mac_init(struct hclge_dev
*hdev
)
2888 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2891 hdev
->support_sfp_query
= true;
2892 hdev
->hw
.mac
.duplex
= HCLGE_MAC_FULL
;
2893 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, hdev
->hw
.mac
.speed
,
2894 hdev
->hw
.mac
.duplex
, hdev
->hw
.mac
.lane_num
);
2898 if (hdev
->hw
.mac
.support_autoneg
) {
2899 ret
= hclge_set_autoneg_en(hdev
, hdev
->hw
.mac
.autoneg
);
2906 if (mac
->user_fec_mode
& BIT(HNAE3_FEC_USER_DEF
)) {
2907 ret
= hclge_set_fec_hw(hdev
, mac
->user_fec_mode
);
2912 ret
= hclge_set_mac_mtu(hdev
, hdev
->mps
);
2914 dev_err(&hdev
->pdev
->dev
, "set mtu failed ret=%d\n", ret
);
2918 ret
= hclge_set_default_loopback(hdev
);
2922 ret
= hclge_buffer_alloc(hdev
);
2924 dev_err(&hdev
->pdev
->dev
,
2925 "allocate buffer fail, ret=%d\n", ret
);
2930 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2932 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2933 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
)) {
2934 hdev
->last_mbx_scheduled
= jiffies
;
2935 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2939 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2941 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2942 test_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
) &&
2943 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
)) {
2944 hdev
->last_rst_scheduled
= jiffies
;
2945 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2949 static void hclge_errhand_task_schedule(struct hclge_dev
*hdev
)
2951 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2952 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
2953 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2956 void hclge_task_schedule(struct hclge_dev
*hdev
, unsigned long delay_time
)
2958 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2959 !test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
2960 mod_delayed_work(hclge_wq
, &hdev
->service_task
, delay_time
);
2963 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
, int *link_status
)
2965 struct hclge_link_status_cmd
*req
;
2966 struct hclge_desc desc
;
2969 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2970 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2972 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2977 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2978 *link_status
= (req
->status
& HCLGE_LINK_STATUS_UP_M
) > 0 ?
2979 HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
2984 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
, int *link_status
)
2986 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2988 *link_status
= HCLGE_LINK_STATUS_DOWN
;
2990 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
))
2993 if (phydev
&& (phydev
->state
!= PHY_RUNNING
|| !phydev
->link
))
2996 return hclge_get_mac_link_status(hdev
, link_status
);
2999 static void hclge_push_link_status(struct hclge_dev
*hdev
)
3001 struct hclge_vport
*vport
;
3005 for (i
= 0; i
< pci_num_vf(hdev
->pdev
); i
++) {
3006 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
3008 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) ||
3009 vport
->vf_info
.link_state
!= IFLA_VF_LINK_STATE_AUTO
)
3012 ret
= hclge_push_vf_link_status(vport
);
3014 dev_err(&hdev
->pdev
->dev
,
3015 "failed to push link status to vf%u, ret = %d\n",
3021 static void hclge_update_link_status(struct hclge_dev
*hdev
)
3023 struct hnae3_handle
*rhandle
= &hdev
->vport
[0].roce
;
3024 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3025 struct hnae3_client
*rclient
= hdev
->roce_client
;
3026 struct hnae3_client
*client
= hdev
->nic_client
;
3033 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
))
3036 ret
= hclge_get_mac_phy_link(hdev
, &state
);
3038 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3042 if (state
!= hdev
->hw
.mac
.link
) {
3043 hdev
->hw
.mac
.link
= state
;
3044 client
->ops
->link_status_change(handle
, state
);
3045 hclge_config_mac_tnl_int(hdev
, state
);
3046 if (rclient
&& rclient
->ops
->link_status_change
)
3047 rclient
->ops
->link_status_change(rhandle
, state
);
3049 hclge_push_link_status(hdev
);
3052 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3055 static void hclge_update_speed_advertising(struct hclge_mac
*mac
)
3059 if (hclge_get_speed_bit(mac
->speed
, &speed_ability
))
3062 switch (mac
->module_type
) {
3063 case HNAE3_MODULE_TYPE_FIBRE_LR
:
3064 hclge_convert_setting_lr(speed_ability
, mac
->advertising
);
3066 case HNAE3_MODULE_TYPE_FIBRE_SR
:
3067 case HNAE3_MODULE_TYPE_AOC
:
3068 hclge_convert_setting_sr(speed_ability
, mac
->advertising
);
3070 case HNAE3_MODULE_TYPE_CR
:
3071 hclge_convert_setting_cr(speed_ability
, mac
->advertising
);
3073 case HNAE3_MODULE_TYPE_KR
:
3074 hclge_convert_setting_kr(speed_ability
, mac
->advertising
);
3081 static void hclge_update_fec_advertising(struct hclge_mac
*mac
)
3083 if (mac
->fec_mode
& BIT(HNAE3_FEC_RS
))
3084 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
3086 else if (mac
->fec_mode
& BIT(HNAE3_FEC_LLRS
))
3087 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
3089 else if (mac
->fec_mode
& BIT(HNAE3_FEC_BASER
))
3090 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
3093 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
3097 static void hclge_update_pause_advertising(struct hclge_dev
*hdev
)
3099 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3102 switch (hdev
->fc_mode_last_time
) {
3103 case HCLGE_FC_RX_PAUSE
:
3107 case HCLGE_FC_TX_PAUSE
:
3121 linkmode_set_pause(mac
->advertising
, tx_en
, rx_en
);
3124 static void hclge_update_advertising(struct hclge_dev
*hdev
)
3126 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3128 linkmode_zero(mac
->advertising
);
3129 hclge_update_speed_advertising(mac
);
3130 hclge_update_fec_advertising(mac
);
3131 hclge_update_pause_advertising(hdev
);
3134 static void hclge_update_port_capability(struct hclge_dev
*hdev
,
3135 struct hclge_mac
*mac
)
3137 if (hnae3_dev_fec_supported(hdev
))
3138 hclge_convert_setting_fec(mac
);
3140 /* firmware can not identify back plane type, the media type
3141 * read from configuration can help deal it
3143 if (mac
->media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
&&
3144 mac
->module_type
== HNAE3_MODULE_TYPE_UNKNOWN
)
3145 mac
->module_type
= HNAE3_MODULE_TYPE_KR
;
3146 else if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3147 mac
->module_type
= HNAE3_MODULE_TYPE_TP
;
3149 if (mac
->support_autoneg
) {
3150 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, mac
->supported
);
3151 linkmode_copy(mac
->advertising
, mac
->supported
);
3153 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
3155 hclge_update_advertising(hdev
);
3159 static int hclge_get_sfp_speed(struct hclge_dev
*hdev
, u32
*speed
)
3161 struct hclge_sfp_info_cmd
*resp
;
3162 struct hclge_desc desc
;
3165 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3166 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3167 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3168 if (ret
== -EOPNOTSUPP
) {
3169 dev_warn(&hdev
->pdev
->dev
,
3170 "IMP do not support get SFP speed %d\n", ret
);
3173 dev_err(&hdev
->pdev
->dev
, "get sfp speed failed %d\n", ret
);
3177 *speed
= le32_to_cpu(resp
->speed
);
3182 static int hclge_get_sfp_info(struct hclge_dev
*hdev
, struct hclge_mac
*mac
)
3184 struct hclge_sfp_info_cmd
*resp
;
3185 struct hclge_desc desc
;
3188 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3189 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3191 resp
->query_type
= QUERY_ACTIVE_SPEED
;
3193 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3194 if (ret
== -EOPNOTSUPP
) {
3195 dev_warn(&hdev
->pdev
->dev
,
3196 "IMP does not support get SFP info %d\n", ret
);
3199 dev_err(&hdev
->pdev
->dev
, "get sfp info failed %d\n", ret
);
3203 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3204 * set to mac->speed.
3206 if (!le32_to_cpu(resp
->speed
))
3209 mac
->speed
= le32_to_cpu(resp
->speed
);
3210 /* if resp->speed_ability is 0, it means it's an old version
3211 * firmware, do not update these params
3213 if (resp
->speed_ability
) {
3214 mac
->module_type
= le32_to_cpu(resp
->module_type
);
3215 mac
->speed_ability
= le32_to_cpu(resp
->speed_ability
);
3216 mac
->autoneg
= resp
->autoneg
;
3217 mac
->support_autoneg
= resp
->autoneg_ability
;
3218 mac
->speed_type
= QUERY_ACTIVE_SPEED
;
3219 mac
->lane_num
= resp
->lane_num
;
3220 if (!resp
->active_fec
)
3223 mac
->fec_mode
= BIT(resp
->active_fec
);
3224 mac
->fec_ability
= resp
->fec_ability
;
3226 mac
->speed_type
= QUERY_SFP_SPEED
;
3232 static int hclge_get_phy_link_ksettings(struct hnae3_handle
*handle
,
3233 struct ethtool_link_ksettings
*cmd
)
3235 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3236 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3237 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3238 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3239 u32 supported
, advertising
, lp_advertising
;
3240 struct hclge_dev
*hdev
= vport
->back
;
3243 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3245 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3246 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3249 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3251 dev_err(&hdev
->pdev
->dev
,
3252 "failed to get phy link ksetting, ret = %d.\n", ret
);
3256 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3257 cmd
->base
.autoneg
= req0
->autoneg
;
3258 cmd
->base
.speed
= le32_to_cpu(req0
->speed
);
3259 cmd
->base
.duplex
= req0
->duplex
;
3260 cmd
->base
.port
= req0
->port
;
3261 cmd
->base
.transceiver
= req0
->transceiver
;
3262 cmd
->base
.phy_address
= req0
->phy_address
;
3263 cmd
->base
.eth_tp_mdix
= req0
->eth_tp_mdix
;
3264 cmd
->base
.eth_tp_mdix_ctrl
= req0
->eth_tp_mdix_ctrl
;
3265 supported
= le32_to_cpu(req0
->supported
);
3266 advertising
= le32_to_cpu(req0
->advertising
);
3267 lp_advertising
= le32_to_cpu(req0
->lp_advertising
);
3268 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
3270 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
3272 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.lp_advertising
,
3275 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3276 cmd
->base
.master_slave_cfg
= req1
->master_slave_cfg
;
3277 cmd
->base
.master_slave_state
= req1
->master_slave_state
;
3283 hclge_set_phy_link_ksettings(struct hnae3_handle
*handle
,
3284 const struct ethtool_link_ksettings
*cmd
)
3286 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3287 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3288 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3289 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3290 struct hclge_dev
*hdev
= vport
->back
;
3294 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
3295 ((cmd
->base
.speed
!= SPEED_100
&& cmd
->base
.speed
!= SPEED_10
) ||
3296 (cmd
->base
.duplex
!= DUPLEX_HALF
&&
3297 cmd
->base
.duplex
!= DUPLEX_FULL
)))
3300 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3302 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3303 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3306 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3307 req0
->autoneg
= cmd
->base
.autoneg
;
3308 req0
->speed
= cpu_to_le32(cmd
->base
.speed
);
3309 req0
->duplex
= cmd
->base
.duplex
;
3310 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
3311 cmd
->link_modes
.advertising
);
3312 req0
->advertising
= cpu_to_le32(advertising
);
3313 req0
->eth_tp_mdix_ctrl
= cmd
->base
.eth_tp_mdix_ctrl
;
3315 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3316 req1
->master_slave_cfg
= cmd
->base
.master_slave_cfg
;
3318 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3320 dev_err(&hdev
->pdev
->dev
,
3321 "failed to set phy link ksettings, ret = %d.\n", ret
);
3325 hdev
->hw
.mac
.autoneg
= cmd
->base
.autoneg
;
3326 hdev
->hw
.mac
.speed
= cmd
->base
.speed
;
3327 hdev
->hw
.mac
.duplex
= cmd
->base
.duplex
;
3328 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
->link_modes
.advertising
);
3333 static int hclge_update_tp_port_info(struct hclge_dev
*hdev
)
3335 struct ethtool_link_ksettings cmd
;
3338 if (!hnae3_dev_phy_imp_supported(hdev
))
3341 ret
= hclge_get_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3345 hdev
->hw
.mac
.autoneg
= cmd
.base
.autoneg
;
3346 hdev
->hw
.mac
.speed
= cmd
.base
.speed
;
3347 hdev
->hw
.mac
.duplex
= cmd
.base
.duplex
;
3348 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
.link_modes
.advertising
);
3353 static int hclge_tp_port_init(struct hclge_dev
*hdev
)
3355 struct ethtool_link_ksettings cmd
;
3357 if (!hnae3_dev_phy_imp_supported(hdev
))
3360 cmd
.base
.autoneg
= hdev
->hw
.mac
.autoneg
;
3361 cmd
.base
.speed
= hdev
->hw
.mac
.speed
;
3362 cmd
.base
.duplex
= hdev
->hw
.mac
.duplex
;
3363 linkmode_copy(cmd
.link_modes
.advertising
, hdev
->hw
.mac
.advertising
);
3365 return hclge_set_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3368 static int hclge_update_port_info(struct hclge_dev
*hdev
)
3370 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3374 /* get the port info from SFP cmd if not copper port */
3375 if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3376 return hclge_update_tp_port_info(hdev
);
3378 /* if IMP does not support get SFP/qSFP info, return directly */
3379 if (!hdev
->support_sfp_query
)
3382 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3384 ret
= hclge_get_sfp_info(hdev
, mac
);
3386 speed
= HCLGE_MAC_SPEED_UNKNOWN
;
3387 ret
= hclge_get_sfp_speed(hdev
, &speed
);
3390 if (ret
== -EOPNOTSUPP
) {
3391 hdev
->support_sfp_query
= false;
3397 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3398 if (mac
->speed_type
== QUERY_ACTIVE_SPEED
) {
3399 hclge_update_port_capability(hdev
, mac
);
3400 if (mac
->speed
!= speed
)
3401 (void)hclge_tm_port_shaper_cfg(hdev
);
3404 return hclge_cfg_mac_speed_dup(hdev
, mac
->speed
,
3405 HCLGE_MAC_FULL
, mac
->lane_num
);
3407 if (speed
== HCLGE_MAC_SPEED_UNKNOWN
)
3408 return 0; /* do nothing if no SFP */
3410 /* must config full duplex for SFP */
3411 return hclge_cfg_mac_speed_dup(hdev
, speed
, HCLGE_MAC_FULL
, 0);
3415 static int hclge_get_status(struct hnae3_handle
*handle
)
3417 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3418 struct hclge_dev
*hdev
= vport
->back
;
3420 hclge_update_link_status(hdev
);
3422 return hdev
->hw
.mac
.link
;
3425 struct hclge_vport
*hclge_get_vf_vport(struct hclge_dev
*hdev
, int vf
)
3427 if (!pci_num_vf(hdev
->pdev
)) {
3428 dev_err(&hdev
->pdev
->dev
,
3429 "SRIOV is disabled, can not get vport(%d) info.\n", vf
);
3433 if (vf
< 0 || vf
>= pci_num_vf(hdev
->pdev
)) {
3434 dev_err(&hdev
->pdev
->dev
,
3435 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3436 vf
, pci_num_vf(hdev
->pdev
));
3440 /* VF start from 1 in vport */
3441 vf
+= HCLGE_VF_VPORT_START_NUM
;
3442 return &hdev
->vport
[vf
];
3445 static int hclge_get_vf_config(struct hnae3_handle
*handle
, int vf
,
3446 struct ifla_vf_info
*ivf
)
3448 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3449 struct hclge_dev
*hdev
= vport
->back
;
3451 vport
= hclge_get_vf_vport(hdev
, vf
);
3456 ivf
->linkstate
= vport
->vf_info
.link_state
;
3457 ivf
->spoofchk
= vport
->vf_info
.spoofchk
;
3458 ivf
->trusted
= vport
->vf_info
.trusted
;
3459 ivf
->min_tx_rate
= 0;
3460 ivf
->max_tx_rate
= vport
->vf_info
.max_tx_rate
;
3461 ivf
->vlan
= vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
;
3462 ivf
->vlan_proto
= htons(vport
->port_base_vlan_cfg
.vlan_info
.vlan_proto
);
3463 ivf
->qos
= vport
->port_base_vlan_cfg
.vlan_info
.qos
;
3464 ether_addr_copy(ivf
->mac
, vport
->vf_info
.mac
);
3469 static int hclge_set_vf_link_state(struct hnae3_handle
*handle
, int vf
,
3472 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3473 struct hclge_dev
*hdev
= vport
->back
;
3477 vport
= hclge_get_vf_vport(hdev
, vf
);
3481 link_state_old
= vport
->vf_info
.link_state
;
3482 vport
->vf_info
.link_state
= link_state
;
3484 /* return success directly if the VF is unalive, VF will
3485 * query link state itself when it starts work.
3487 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
3490 ret
= hclge_push_vf_link_status(vport
);
3492 vport
->vf_info
.link_state
= link_state_old
;
3493 dev_err(&hdev
->pdev
->dev
,
3494 "failed to push vf%d link status, ret = %d\n", vf
, ret
);
3500 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
3502 u32 cmdq_src_reg
, msix_src_reg
, hw_err_src_reg
;
3504 /* fetch the events from their corresponding regs */
3505 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
3506 msix_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
3507 hw_err_src_reg
= hclge_read_dev(&hdev
->hw
,
3508 HCLGE_RAS_PF_OTHER_INT_STS_REG
);
3510 /* Assumption: If by any chance reset and mailbox events are reported
3511 * together then we will only process reset event in this go and will
3512 * defer the processing of the mailbox events. Since, we would have not
3513 * cleared RX CMDQ event this time we would receive again another
3514 * interrupt from H/W just for the mailbox.
3516 * check for vector0 reset event sources
3518 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & msix_src_reg
) {
3519 dev_info(&hdev
->pdev
->dev
, "IMP reset interrupt\n");
3520 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
3521 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3522 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
3523 hdev
->rst_stats
.imp_rst_cnt
++;
3524 return HCLGE_VECTOR0_EVENT_RST
;
3527 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & msix_src_reg
) {
3528 dev_info(&hdev
->pdev
->dev
, "global reset interrupt\n");
3529 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3530 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
3531 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
3532 hdev
->rst_stats
.global_rst_cnt
++;
3533 return HCLGE_VECTOR0_EVENT_RST
;
3536 /* check for vector0 msix event and hardware error event source */
3537 if (msix_src_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
||
3538 hw_err_src_reg
& HCLGE_RAS_REG_ERR_MASK
)
3539 return HCLGE_VECTOR0_EVENT_ERR
;
3541 /* check for vector0 ptp event source */
3542 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B
) & msix_src_reg
) {
3543 *clearval
= msix_src_reg
;
3544 return HCLGE_VECTOR0_EVENT_PTP
;
3547 /* check for vector0 mailbox(=CMDQ RX) event source */
3548 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
3549 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
3550 *clearval
= cmdq_src_reg
;
3551 return HCLGE_VECTOR0_EVENT_MBX
;
3554 /* print other vector0 event source */
3555 dev_info(&hdev
->pdev
->dev
,
3556 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3557 cmdq_src_reg
, hw_err_src_reg
, msix_src_reg
);
3559 return HCLGE_VECTOR0_EVENT_OTHER
;
3562 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
3565 #define HCLGE_IMP_RESET_DELAY 5
3567 switch (event_type
) {
3568 case HCLGE_VECTOR0_EVENT_PTP
:
3569 case HCLGE_VECTOR0_EVENT_RST
:
3570 if (regclr
== BIT(HCLGE_VECTOR0_IMPRESET_INT_B
))
3571 mdelay(HCLGE_IMP_RESET_DELAY
);
3573 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
3575 case HCLGE_VECTOR0_EVENT_MBX
:
3576 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
3583 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
3585 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
3586 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
3587 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
3588 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
3589 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
3592 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
3594 writel(enable
? 1 : 0, vector
->addr
);
3597 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
3599 struct hclge_dev
*hdev
= data
;
3600 unsigned long flags
;
3604 hclge_enable_vector(&hdev
->misc_vector
, false);
3605 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
3607 /* vector 0 interrupt is shared with reset and mailbox source events. */
3608 switch (event_cause
) {
3609 case HCLGE_VECTOR0_EVENT_ERR
:
3610 hclge_errhand_task_schedule(hdev
);
3612 case HCLGE_VECTOR0_EVENT_RST
:
3613 hclge_reset_task_schedule(hdev
);
3615 case HCLGE_VECTOR0_EVENT_PTP
:
3616 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
3617 hclge_ptp_clean_tx_hwts(hdev
);
3618 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
3620 case HCLGE_VECTOR0_EVENT_MBX
:
3621 /* If we are here then,
3622 * 1. Either we are not handling any mbx task and we are not
3625 * 2. We could be handling a mbx task but nothing more is
3627 * In both cases, we should schedule mbx task as there are more
3628 * mbx messages reported by this interrupt.
3630 hclge_mbx_task_schedule(hdev
);
3633 dev_warn(&hdev
->pdev
->dev
,
3634 "received unknown or unhandled event of vector0\n");
3638 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
3640 /* Enable interrupt if it is not caused by reset event or error event */
3641 if (event_cause
== HCLGE_VECTOR0_EVENT_PTP
||
3642 event_cause
== HCLGE_VECTOR0_EVENT_MBX
||
3643 event_cause
== HCLGE_VECTOR0_EVENT_OTHER
)
3644 hclge_enable_vector(&hdev
->misc_vector
, true);
3649 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
3651 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
3652 dev_warn(&hdev
->pdev
->dev
,
3653 "vector(vector_id %d) has been freed.\n", vector_id
);
3657 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
3658 hdev
->num_msi_left
+= 1;
3659 hdev
->num_msi_used
-= 1;
3662 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
3664 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
3666 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
3668 vector
->addr
= hdev
->hw
.hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
3669 hdev
->vector_status
[0] = 0;
3671 hdev
->num_msi_left
-= 1;
3672 hdev
->num_msi_used
+= 1;
3675 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
3679 hclge_get_misc_vector(hdev
);
3681 /* this would be explicitly freed in the end */
3682 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
3683 HCLGE_NAME
, pci_name(hdev
->pdev
));
3684 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
3685 0, hdev
->misc_vector
.name
, hdev
);
3687 hclge_free_vector(hdev
, 0);
3688 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
3689 hdev
->misc_vector
.vector_irq
);
3695 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
3697 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
3698 hclge_free_vector(hdev
, 0);
3701 int hclge_notify_client(struct hclge_dev
*hdev
,
3702 enum hnae3_reset_notify_type type
)
3704 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3705 struct hnae3_client
*client
= hdev
->nic_client
;
3708 if (!test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
) || !client
)
3711 if (!client
->ops
->reset_notify
)
3714 ret
= client
->ops
->reset_notify(handle
, type
);
3716 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
3722 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
3723 enum hnae3_reset_notify_type type
)
3725 struct hnae3_handle
*handle
= &hdev
->vport
[0].roce
;
3726 struct hnae3_client
*client
= hdev
->roce_client
;
3729 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
3732 if (!client
->ops
->reset_notify
)
3735 ret
= client
->ops
->reset_notify(handle
, type
);
3737 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
3743 static int hclge_reset_wait(struct hclge_dev
*hdev
)
3745 #define HCLGE_RESET_WATI_MS 100
3746 #define HCLGE_RESET_WAIT_CNT 350
3748 u32 val
, reg
, reg_bit
;
3751 switch (hdev
->reset_type
) {
3752 case HNAE3_IMP_RESET
:
3753 reg
= HCLGE_GLOBAL_RESET_REG
;
3754 reg_bit
= HCLGE_IMP_RESET_BIT
;
3756 case HNAE3_GLOBAL_RESET
:
3757 reg
= HCLGE_GLOBAL_RESET_REG
;
3758 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
3760 case HNAE3_FUNC_RESET
:
3761 reg
= HCLGE_FUN_RST_ING
;
3762 reg_bit
= HCLGE_FUN_RST_ING_B
;
3765 dev_err(&hdev
->pdev
->dev
,
3766 "Wait for unsupported reset type: %d\n",
3771 val
= hclge_read_dev(&hdev
->hw
, reg
);
3772 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
3773 msleep(HCLGE_RESET_WATI_MS
);
3774 val
= hclge_read_dev(&hdev
->hw
, reg
);
3778 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
3779 dev_warn(&hdev
->pdev
->dev
,
3780 "Wait for reset timeout: %d\n", hdev
->reset_type
);
3787 static int hclge_set_vf_rst(struct hclge_dev
*hdev
, int func_id
, bool reset
)
3789 struct hclge_vf_rst_cmd
*req
;
3790 struct hclge_desc desc
;
3792 req
= (struct hclge_vf_rst_cmd
*)desc
.data
;
3793 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GBL_RST_STATUS
, false);
3794 req
->dest_vfid
= func_id
;
3799 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3802 static int hclge_set_all_vf_rst(struct hclge_dev
*hdev
, bool reset
)
3806 for (i
= HCLGE_VF_VPORT_START_NUM
; i
< hdev
->num_alloc_vport
; i
++) {
3807 struct hclge_vport
*vport
= &hdev
->vport
[i
];
3810 /* Send cmd to set/clear VF's FUNC_RST_ING */
3811 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, reset
);
3813 dev_err(&hdev
->pdev
->dev
,
3814 "set vf(%u) rst failed %d!\n",
3815 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3821 !test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
))
3824 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) &&
3825 hdev
->reset_type
== HNAE3_FUNC_RESET
) {
3826 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET
,
3827 &vport
->need_notify
);
3831 /* Inform VF to process the reset.
3832 * hclge_inform_reset_assert_to_vf may fail if VF
3833 * driver is not loaded.
3835 ret
= hclge_inform_reset_assert_to_vf(vport
);
3837 dev_warn(&hdev
->pdev
->dev
,
3838 "inform reset to vf(%u) failed %d!\n",
3839 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3846 static void hclge_mailbox_service_task(struct hclge_dev
*hdev
)
3848 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
) ||
3849 test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
) ||
3850 test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
3853 if (time_is_before_jiffies(hdev
->last_mbx_scheduled
+
3854 HCLGE_MBX_SCHED_TIMEOUT
))
3855 dev_warn(&hdev
->pdev
->dev
,
3856 "mbx service task is scheduled after %ums on cpu%u!\n",
3857 jiffies_to_msecs(jiffies
- hdev
->last_mbx_scheduled
),
3858 smp_processor_id());
3860 hclge_mbx_handler(hdev
);
3862 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
3865 static void hclge_func_reset_sync_vf(struct hclge_dev
*hdev
)
3867 struct hclge_pf_rst_sync_cmd
*req
;
3868 struct hclge_desc desc
;
3872 req
= (struct hclge_pf_rst_sync_cmd
*)desc
.data
;
3873 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_VF_RST_RDY
, true);
3876 /* vf need to down netdev by mbx during PF or FLR reset */
3877 hclge_mailbox_service_task(hdev
);
3879 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3880 /* for compatible with old firmware, wait
3881 * 100 ms for VF to stop IO
3883 if (ret
== -EOPNOTSUPP
) {
3884 msleep(HCLGE_RESET_SYNC_TIME
);
3887 dev_warn(&hdev
->pdev
->dev
, "sync with VF fail %d!\n",
3890 } else if (req
->all_vf_ready
) {
3893 msleep(HCLGE_PF_RESET_SYNC_TIME
);
3894 hclge_comm_cmd_reuse_desc(&desc
, true);
3895 } while (cnt
++ < HCLGE_PF_RESET_SYNC_CNT
);
3897 dev_warn(&hdev
->pdev
->dev
, "sync with VF timeout!\n");
3900 void hclge_report_hw_error(struct hclge_dev
*hdev
,
3901 enum hnae3_hw_error_type type
)
3903 struct hnae3_client
*client
= hdev
->nic_client
;
3905 if (!client
|| !client
->ops
->process_hw_error
||
3906 !test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
))
3909 client
->ops
->process_hw_error(&hdev
->vport
[0].nic
, type
);
3912 static void hclge_handle_imp_error(struct hclge_dev
*hdev
)
3916 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
3917 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
)) {
3918 hclge_report_hw_error(hdev
, HNAE3_IMP_RD_POISON_ERROR
);
3919 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
);
3920 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
3923 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
)) {
3924 hclge_report_hw_error(hdev
, HNAE3_CMDQ_ECC_ERROR
);
3925 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
);
3926 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
3930 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
3932 struct hclge_desc desc
;
3933 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
3936 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
3937 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
3938 req
->fun_reset_vfid
= func_id
;
3940 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3942 dev_err(&hdev
->pdev
->dev
,
3943 "send function reset cmd fail, status =%d\n", ret
);
3948 static void hclge_do_reset(struct hclge_dev
*hdev
)
3950 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3951 struct pci_dev
*pdev
= hdev
->pdev
;
3954 if (hclge_get_hw_reset_stat(handle
)) {
3955 dev_info(&pdev
->dev
, "hardware reset not finish\n");
3956 dev_info(&pdev
->dev
, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3957 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
),
3958 hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
));
3962 switch (hdev
->reset_type
) {
3963 case HNAE3_IMP_RESET
:
3964 dev_info(&pdev
->dev
, "IMP reset requested\n");
3965 val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
3966 hnae3_set_bit(val
, HCLGE_TRIGGER_IMP_RESET_B
, 1);
3967 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, val
);
3969 case HNAE3_GLOBAL_RESET
:
3970 dev_info(&pdev
->dev
, "global reset requested\n");
3971 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
3972 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
3973 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
3975 case HNAE3_FUNC_RESET
:
3976 dev_info(&pdev
->dev
, "PF reset requested\n");
3977 /* schedule again to check later */
3978 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
3979 hclge_reset_task_schedule(hdev
);
3982 dev_warn(&pdev
->dev
,
3983 "unsupported reset type: %d\n", hdev
->reset_type
);
3988 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
3989 unsigned long *addr
)
3991 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
3992 struct hclge_dev
*hdev
= ae_dev
->priv
;
3994 /* return the highest priority reset level amongst all */
3995 if (test_bit(HNAE3_IMP_RESET
, addr
)) {
3996 rst_level
= HNAE3_IMP_RESET
;
3997 clear_bit(HNAE3_IMP_RESET
, addr
);
3998 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
3999 clear_bit(HNAE3_FUNC_RESET
, addr
);
4000 } else if (test_bit(HNAE3_GLOBAL_RESET
, addr
)) {
4001 rst_level
= HNAE3_GLOBAL_RESET
;
4002 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
4003 clear_bit(HNAE3_FUNC_RESET
, addr
);
4004 } else if (test_bit(HNAE3_FUNC_RESET
, addr
)) {
4005 rst_level
= HNAE3_FUNC_RESET
;
4006 clear_bit(HNAE3_FUNC_RESET
, addr
);
4007 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
4008 rst_level
= HNAE3_FLR_RESET
;
4009 clear_bit(HNAE3_FLR_RESET
, addr
);
4012 if (hdev
->reset_type
!= HNAE3_NONE_RESET
&&
4013 rst_level
< hdev
->reset_type
)
4014 return HNAE3_NONE_RESET
;
4019 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
4023 switch (hdev
->reset_type
) {
4024 case HNAE3_IMP_RESET
:
4025 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
4027 case HNAE3_GLOBAL_RESET
:
4028 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
4037 /* For revision 0x20, the reset interrupt source
4038 * can only be cleared after hardware reset done
4040 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
4041 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
,
4044 hclge_enable_vector(&hdev
->misc_vector
, true);
4047 static void hclge_reset_handshake(struct hclge_dev
*hdev
, bool enable
)
4051 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
);
4053 reg_val
|= HCLGE_COMM_NIC_SW_RST_RDY
;
4055 reg_val
&= ~HCLGE_COMM_NIC_SW_RST_RDY
;
4057 hclge_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
, reg_val
);
4060 static int hclge_func_reset_notify_vf(struct hclge_dev
*hdev
)
4064 ret
= hclge_set_all_vf_rst(hdev
, true);
4068 hclge_func_reset_sync_vf(hdev
);
4073 static int hclge_reset_prepare_wait(struct hclge_dev
*hdev
)
4078 switch (hdev
->reset_type
) {
4079 case HNAE3_FUNC_RESET
:
4080 ret
= hclge_func_reset_notify_vf(hdev
);
4084 ret
= hclge_func_reset_cmd(hdev
, 0);
4086 dev_err(&hdev
->pdev
->dev
,
4087 "asserting function reset fail %d!\n", ret
);
4091 /* After performaning pf reset, it is not necessary to do the
4092 * mailbox handling or send any command to firmware, because
4093 * any mailbox handling or command to firmware is only valid
4094 * after hclge_comm_cmd_init is called.
4096 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
4097 hdev
->rst_stats
.pf_rst_cnt
++;
4099 case HNAE3_FLR_RESET
:
4100 ret
= hclge_func_reset_notify_vf(hdev
);
4104 case HNAE3_IMP_RESET
:
4105 hclge_handle_imp_error(hdev
);
4106 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
4107 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
,
4108 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B
) | reg_val
);
4114 /* inform hardware that preparatory work is done */
4115 msleep(HCLGE_RESET_SYNC_TIME
);
4116 hclge_reset_handshake(hdev
, true);
4117 dev_info(&hdev
->pdev
->dev
, "prepare wait ok\n");
4122 static void hclge_show_rst_info(struct hclge_dev
*hdev
)
4126 buf
= kzalloc(HCLGE_DBG_RESET_INFO_LEN
, GFP_KERNEL
);
4130 hclge_dbg_dump_rst_info(hdev
, buf
, HCLGE_DBG_RESET_INFO_LEN
);
4132 dev_info(&hdev
->pdev
->dev
, "dump reset info:\n%s", buf
);
4137 static bool hclge_reset_err_handle(struct hclge_dev
*hdev
)
4139 #define MAX_RESET_FAIL_CNT 5
4141 if (hdev
->reset_pending
) {
4142 dev_info(&hdev
->pdev
->dev
, "Reset pending %lu\n",
4143 hdev
->reset_pending
);
4145 } else if (hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
) &
4146 HCLGE_RESET_INT_M
) {
4147 dev_info(&hdev
->pdev
->dev
,
4148 "reset failed because new reset interrupt\n");
4149 hclge_clear_reset_cause(hdev
);
4151 } else if (hdev
->rst_stats
.reset_fail_cnt
< MAX_RESET_FAIL_CNT
) {
4152 hdev
->rst_stats
.reset_fail_cnt
++;
4153 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
4154 dev_info(&hdev
->pdev
->dev
,
4155 "re-schedule reset task(%u)\n",
4156 hdev
->rst_stats
.reset_fail_cnt
);
4160 hclge_clear_reset_cause(hdev
);
4162 /* recover the handshake status when reset fail */
4163 hclge_reset_handshake(hdev
, true);
4165 dev_err(&hdev
->pdev
->dev
, "Reset fail!\n");
4167 hclge_show_rst_info(hdev
);
4169 set_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4174 static void hclge_update_reset_level(struct hclge_dev
*hdev
)
4176 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4177 enum hnae3_reset_type reset_level
;
4179 /* reset request will not be set during reset, so clear
4180 * pending reset request to avoid unnecessary reset
4181 * caused by the same reason.
4183 hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4185 /* if default_reset_request has a higher level reset request,
4186 * it should be handled as soon as possible. since some errors
4187 * need this kind of reset to fix.
4189 reset_level
= hclge_get_reset_level(ae_dev
,
4190 &hdev
->default_reset_request
);
4191 if (reset_level
!= HNAE3_NONE_RESET
)
4192 set_bit(reset_level
, &hdev
->reset_request
);
4195 static int hclge_set_rst_done(struct hclge_dev
*hdev
)
4197 struct hclge_pf_rst_done_cmd
*req
;
4198 struct hclge_desc desc
;
4201 req
= (struct hclge_pf_rst_done_cmd
*)desc
.data
;
4202 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PF_RST_DONE
, false);
4203 req
->pf_rst_done
|= HCLGE_PF_RESET_DONE_BIT
;
4205 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4206 /* To be compatible with the old firmware, which does not support
4207 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4210 if (ret
== -EOPNOTSUPP
) {
4211 dev_warn(&hdev
->pdev
->dev
,
4212 "current firmware does not support command(0x%x)!\n",
4213 HCLGE_OPC_PF_RST_DONE
);
4216 dev_err(&hdev
->pdev
->dev
, "assert PF reset done fail %d!\n",
4223 static int hclge_reset_prepare_up(struct hclge_dev
*hdev
)
4227 switch (hdev
->reset_type
) {
4228 case HNAE3_FUNC_RESET
:
4229 case HNAE3_FLR_RESET
:
4230 ret
= hclge_set_all_vf_rst(hdev
, false);
4232 case HNAE3_GLOBAL_RESET
:
4233 case HNAE3_IMP_RESET
:
4234 ret
= hclge_set_rst_done(hdev
);
4240 /* clear up the handshake status after re-initialize done */
4241 hclge_reset_handshake(hdev
, false);
4246 static int hclge_reset_stack(struct hclge_dev
*hdev
)
4250 ret
= hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
4254 ret
= hclge_reset_ae_dev(hdev
->ae_dev
);
4258 return hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
4261 static int hclge_reset_prepare(struct hclge_dev
*hdev
)
4265 hdev
->rst_stats
.reset_cnt
++;
4266 /* perform reset of the stack & ae device for a client */
4267 ret
= hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
4272 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
4277 return hclge_reset_prepare_wait(hdev
);
4280 static int hclge_reset_rebuild(struct hclge_dev
*hdev
)
4284 hdev
->rst_stats
.hw_reset_done_cnt
++;
4286 ret
= hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
4291 ret
= hclge_reset_stack(hdev
);
4296 hclge_clear_reset_cause(hdev
);
4298 ret
= hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
4299 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4303 hdev
->rst_stats
.reset_fail_cnt
< HCLGE_RESET_MAX_FAIL_CNT
- 1)
4306 ret
= hclge_reset_prepare_up(hdev
);
4311 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
4316 ret
= hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
4320 hdev
->last_reset_time
= jiffies
;
4321 hdev
->rst_stats
.reset_fail_cnt
= 0;
4322 hdev
->rst_stats
.reset_done_cnt
++;
4323 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4325 hclge_update_reset_level(hdev
);
4330 static void hclge_reset(struct hclge_dev
*hdev
)
4332 if (hclge_reset_prepare(hdev
))
4335 if (hclge_reset_wait(hdev
))
4338 if (hclge_reset_rebuild(hdev
))
4344 if (hclge_reset_err_handle(hdev
))
4345 hclge_reset_task_schedule(hdev
);
4348 static void hclge_reset_event(struct pci_dev
*pdev
, struct hnae3_handle
*handle
)
4350 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
4351 struct hclge_dev
*hdev
= ae_dev
->priv
;
4353 /* We might end up getting called broadly because of 2 below cases:
4354 * 1. Recoverable error was conveyed through APEI and only way to bring
4355 * normalcy is to reset.
4356 * 2. A new reset request from the stack due to timeout
4358 * check if this is a new reset request and we are not here just because
4359 * last reset attempt did not succeed and watchdog hit us again. We will
4360 * know this if last reset request did not occur very recently (watchdog
4361 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4362 * In case of new request we reset the "reset level" to PF reset.
4363 * And if it is a repeat reset request of the most recent one then we
4364 * want to make sure we throttle the reset request. Therefore, we will
4365 * not allow it again before 3*HZ times.
4368 if (time_before(jiffies
, (hdev
->last_reset_time
+
4369 HCLGE_RESET_INTERVAL
))) {
4370 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
4374 if (hdev
->default_reset_request
) {
4376 hclge_get_reset_level(ae_dev
,
4377 &hdev
->default_reset_request
);
4378 } else if (time_after(jiffies
, (hdev
->last_reset_time
+ 4 * 5 * HZ
))) {
4379 hdev
->reset_level
= HNAE3_FUNC_RESET
;
4382 dev_info(&hdev
->pdev
->dev
, "received reset event, reset type is %d\n",
4385 /* request reset & schedule reset task */
4386 set_bit(hdev
->reset_level
, &hdev
->reset_request
);
4387 hclge_reset_task_schedule(hdev
);
4389 if (hdev
->reset_level
< HNAE3_GLOBAL_RESET
)
4390 hdev
->reset_level
++;
4393 static void hclge_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
4394 enum hnae3_reset_type rst_type
)
4396 struct hclge_dev
*hdev
= ae_dev
->priv
;
4398 set_bit(rst_type
, &hdev
->default_reset_request
);
4401 static void hclge_reset_timer(struct timer_list
*t
)
4403 struct hclge_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
4405 /* if default_reset_request has no value, it means that this reset
4406 * request has already be handled, so just return here
4408 if (!hdev
->default_reset_request
)
4411 dev_info(&hdev
->pdev
->dev
,
4412 "triggering reset in reset timer\n");
4413 hclge_reset_event(hdev
->pdev
, NULL
);
4416 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
4418 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4420 /* check if there is any ongoing reset in the hardware. This status can
4421 * be checked from reset_pending. If there is then, we need to wait for
4422 * hardware to complete reset.
4423 * a. If we are able to figure out in reasonable time that hardware
4424 * has fully resetted then, we can proceed with driver, client
4426 * b. else, we can come back later to check this status so re-sched
4429 hdev
->last_reset_time
= jiffies
;
4430 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_pending
);
4431 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4434 /* check if we got any *new* reset requests to be honored */
4435 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4436 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4437 hclge_do_reset(hdev
);
4439 hdev
->reset_type
= HNAE3_NONE_RESET
;
4442 static void hclge_handle_err_reset_request(struct hclge_dev
*hdev
)
4444 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4445 enum hnae3_reset_type reset_type
;
4447 if (ae_dev
->hw_err_reset_req
) {
4448 reset_type
= hclge_get_reset_level(ae_dev
,
4449 &ae_dev
->hw_err_reset_req
);
4450 hclge_set_def_reset_request(ae_dev
, reset_type
);
4453 if (hdev
->default_reset_request
&& ae_dev
->ops
->reset_event
)
4454 ae_dev
->ops
->reset_event(hdev
->pdev
, NULL
);
4456 /* enable interrupt after error handling complete */
4457 hclge_enable_vector(&hdev
->misc_vector
, true);
4460 static void hclge_handle_err_recovery(struct hclge_dev
*hdev
)
4462 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4464 ae_dev
->hw_err_reset_req
= 0;
4466 if (hclge_find_error_source(hdev
)) {
4467 hclge_handle_error_info_log(ae_dev
);
4468 hclge_handle_mac_tnl(hdev
);
4469 hclge_handle_vf_queue_err_ras(hdev
);
4472 hclge_handle_err_reset_request(hdev
);
4475 static void hclge_misc_err_recovery(struct hclge_dev
*hdev
)
4477 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4478 struct device
*dev
= &hdev
->pdev
->dev
;
4481 msix_sts_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
4482 if (msix_sts_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
) {
4483 if (hclge_handle_hw_msix_error
4484 (hdev
, &hdev
->default_reset_request
))
4485 dev_info(dev
, "received msix interrupt 0x%x\n",
4489 hclge_handle_hw_ras_error(ae_dev
);
4491 hclge_handle_err_reset_request(hdev
);
4494 static void hclge_errhand_service_task(struct hclge_dev
*hdev
)
4496 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
4499 if (hnae3_dev_ras_imp_supported(hdev
))
4500 hclge_handle_err_recovery(hdev
);
4502 hclge_misc_err_recovery(hdev
);
4505 static void hclge_reset_service_task(struct hclge_dev
*hdev
)
4507 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
4510 if (time_is_before_jiffies(hdev
->last_rst_scheduled
+
4511 HCLGE_RESET_SCHED_TIMEOUT
))
4512 dev_warn(&hdev
->pdev
->dev
,
4513 "reset service task is scheduled after %ums on cpu%u!\n",
4514 jiffies_to_msecs(jiffies
- hdev
->last_rst_scheduled
),
4515 smp_processor_id());
4517 down(&hdev
->reset_sem
);
4518 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4520 hclge_reset_subtask(hdev
);
4522 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4523 up(&hdev
->reset_sem
);
4526 static void hclge_update_vport_alive(struct hclge_dev
*hdev
)
4528 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4530 unsigned long alive_time
= HCLGE_ALIVE_SECONDS_NORMAL
* HZ
;
4533 /* start from vport 1 for PF is always alive */
4534 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++) {
4535 struct hclge_vport
*vport
= &hdev
->vport
[i
];
4537 if (!test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
) ||
4538 !test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
4540 if (time_after(jiffies
, vport
->last_active_jiffies
+
4542 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
4543 dev_warn(&hdev
->pdev
->dev
,
4544 "VF %u heartbeat timeout\n",
4545 i
- HCLGE_VF_VPORT_START_NUM
);
4550 static void hclge_periodic_service_task(struct hclge_dev
*hdev
)
4552 unsigned long delta
= round_jiffies_relative(HZ
);
4554 if (test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
4557 /* Always handle the link updating to make sure link state is
4558 * updated when it is triggered by mbx.
4560 hclge_update_link_status(hdev
);
4561 hclge_sync_mac_table(hdev
);
4562 hclge_sync_promisc_mode(hdev
);
4563 hclge_sync_fd_table(hdev
);
4565 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
4566 delta
= jiffies
- hdev
->last_serv_processed
;
4568 if (delta
< round_jiffies_relative(HZ
)) {
4569 delta
= round_jiffies_relative(HZ
) - delta
;
4574 hdev
->serv_processed_cnt
++;
4575 hclge_update_vport_alive(hdev
);
4577 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
)) {
4578 hdev
->last_serv_processed
= jiffies
;
4582 if (!(hdev
->serv_processed_cnt
% HCLGE_STATS_TIMER_INTERVAL
))
4583 hclge_update_stats_for_all(hdev
);
4585 hclge_update_port_info(hdev
);
4586 hclge_sync_vlan_filter(hdev
);
4588 if (!(hdev
->serv_processed_cnt
% HCLGE_ARFS_EXPIRE_INTERVAL
))
4589 hclge_rfs_filter_expire(hdev
);
4591 hdev
->last_serv_processed
= jiffies
;
4594 hclge_task_schedule(hdev
, delta
);
4597 static void hclge_ptp_service_task(struct hclge_dev
*hdev
)
4599 unsigned long flags
;
4601 if (!test_bit(HCLGE_STATE_PTP_EN
, &hdev
->state
) ||
4602 !test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
) ||
4603 !time_is_before_jiffies(hdev
->ptp
->tx_start
+ HZ
))
4606 /* to prevent concurrence with the irq handler */
4607 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
4609 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4610 * handler may handle it just before spin_lock_irqsave().
4612 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
))
4613 hclge_ptp_clean_tx_hwts(hdev
);
4615 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
4618 static void hclge_service_task(struct work_struct
*work
)
4620 struct hclge_dev
*hdev
=
4621 container_of(work
, struct hclge_dev
, service_task
.work
);
4623 hclge_errhand_service_task(hdev
);
4624 hclge_reset_service_task(hdev
);
4625 hclge_ptp_service_task(hdev
);
4626 hclge_mailbox_service_task(hdev
);
4627 hclge_periodic_service_task(hdev
);
4629 /* Handle error recovery, reset and mbx again in case periodical task
4630 * delays the handling by calling hclge_task_schedule() in
4631 * hclge_periodic_service_task().
4633 hclge_errhand_service_task(hdev
);
4634 hclge_reset_service_task(hdev
);
4635 hclge_mailbox_service_task(hdev
);
4638 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
4640 /* VF handle has no client */
4641 if (!handle
->client
)
4642 return container_of(handle
, struct hclge_vport
, nic
);
4643 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
4644 return container_of(handle
, struct hclge_vport
, roce
);
4646 return container_of(handle
, struct hclge_vport
, nic
);
4649 static void hclge_get_vector_info(struct hclge_dev
*hdev
, u16 idx
,
4650 struct hnae3_vector_info
*vector_info
)
4652 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4654 vector_info
->vector
= pci_irq_vector(hdev
->pdev
, idx
);
4656 /* need an extend offset to config vector >= 64 */
4657 if (idx
- 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
)
4658 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4659 HCLGE_VECTOR_REG_BASE
+
4660 (idx
- 1) * HCLGE_VECTOR_REG_OFFSET
;
4662 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4663 HCLGE_VECTOR_EXT_REG_BASE
+
4664 (idx
- 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4665 HCLGE_VECTOR_REG_OFFSET_H
+
4666 (idx
- 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4667 HCLGE_VECTOR_REG_OFFSET
;
4669 hdev
->vector_status
[idx
] = hdev
->vport
[0].vport_id
;
4670 hdev
->vector_irq
[idx
] = vector_info
->vector
;
4673 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
4674 struct hnae3_vector_info
*vector_info
)
4676 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4677 struct hnae3_vector_info
*vector
= vector_info
;
4678 struct hclge_dev
*hdev
= vport
->back
;
4683 vector_num
= min_t(u16
, hdev
->num_nic_msi
- 1, vector_num
);
4684 vector_num
= min(hdev
->num_msi_left
, vector_num
);
4686 for (j
= 0; j
< vector_num
; j
++) {
4687 while (++i
< hdev
->num_nic_msi
) {
4688 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
4689 hclge_get_vector_info(hdev
, i
, vector
);
4697 hdev
->num_msi_left
-= alloc
;
4698 hdev
->num_msi_used
+= alloc
;
4703 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
4707 for (i
= 0; i
< hdev
->num_msi
; i
++)
4708 if (vector
== hdev
->vector_irq
[i
])
4714 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
4716 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4717 struct hclge_dev
*hdev
= vport
->back
;
4720 vector_id
= hclge_get_vector_index(hdev
, vector
);
4721 if (vector_id
< 0) {
4722 dev_err(&hdev
->pdev
->dev
,
4723 "Get vector index fail. vector = %d\n", vector
);
4727 hclge_free_vector(hdev
, vector_id
);
4732 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
4735 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4736 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4737 struct hclge_comm_rss_cfg
*rss_cfg
= &vport
->back
->rss_cfg
;
4739 hclge_comm_get_rss_hash_info(rss_cfg
, key
, hfunc
);
4741 hclge_comm_get_rss_indir_tbl(rss_cfg
, indir
,
4742 ae_dev
->dev_specs
.rss_ind_tbl_size
);
4747 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
4748 const u8
*key
, const u8 hfunc
)
4750 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4751 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4752 struct hclge_dev
*hdev
= vport
->back
;
4753 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
4756 ret
= hclge_comm_set_rss_hash_key(rss_cfg
, &hdev
->hw
.hw
, key
, hfunc
);
4758 dev_err(&hdev
->pdev
->dev
, "invalid hfunc type %u\n", hfunc
);
4762 /* Update the shadow RSS table with user specified qids */
4763 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
4764 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
4766 /* Update the hardware */
4767 return hclge_comm_set_rss_indir_table(ae_dev
, &hdev
->hw
.hw
,
4768 rss_cfg
->rss_indirection_tbl
);
4771 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
4772 struct ethtool_rxnfc
*nfc
)
4774 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4775 struct hclge_dev
*hdev
= vport
->back
;
4778 ret
= hclge_comm_set_rss_tuple(hdev
->ae_dev
, &hdev
->hw
.hw
,
4779 &hdev
->rss_cfg
, nfc
);
4781 dev_err(&hdev
->pdev
->dev
,
4782 "failed to set rss tuple, ret = %d.\n", ret
);
4789 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
4790 struct ethtool_rxnfc
*nfc
)
4792 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4798 ret
= hclge_comm_get_rss_tuple(&vport
->back
->rss_cfg
, nfc
->flow_type
,
4800 if (ret
|| !tuple_sets
)
4803 nfc
->data
= hclge_comm_convert_rss_tuple(tuple_sets
);
4808 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
4810 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4811 struct hclge_dev
*hdev
= vport
->back
;
4813 return hdev
->pf_rss_size_max
;
4816 static int hclge_init_rss_tc_mode(struct hclge_dev
*hdev
)
4818 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
4819 struct hclge_vport
*vport
= hdev
->vport
;
4820 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
4821 u16 tc_valid
[HCLGE_MAX_TC_NUM
] = {0};
4822 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
4823 struct hnae3_tc_info
*tc_info
;
4828 tc_info
= &vport
->nic
.kinfo
.tc_info
;
4829 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
4830 rss_size
= tc_info
->tqp_count
[i
];
4833 if (!(hdev
->hw_tc_map
& BIT(i
)))
4836 /* tc_size set to hardware is the log2 of roundup power of two
4837 * of rss_size, the acutal queue size is limited by indirection
4840 if (rss_size
> ae_dev
->dev_specs
.rss_ind_tbl_size
||
4842 dev_err(&hdev
->pdev
->dev
,
4843 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4848 roundup_size
= roundup_pow_of_two(rss_size
);
4849 roundup_size
= ilog2(roundup_size
);
4852 tc_size
[i
] = roundup_size
;
4853 tc_offset
[i
] = tc_info
->tqp_offset
[i
];
4856 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
4860 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
4862 u16
*rss_indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
4863 u8
*key
= hdev
->rss_cfg
.rss_hash_key
;
4864 u8 hfunc
= hdev
->rss_cfg
.rss_algo
;
4867 ret
= hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
4872 ret
= hclge_comm_set_rss_algo_key(&hdev
->hw
.hw
, hfunc
, key
);
4876 ret
= hclge_comm_set_rss_input_tuple(&hdev
->hw
.hw
, &hdev
->rss_cfg
);
4880 return hclge_init_rss_tc_mode(hdev
);
4883 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
4884 int vector_id
, bool en
,
4885 struct hnae3_ring_chain_node
*ring_chain
)
4887 struct hclge_dev
*hdev
= vport
->back
;
4888 struct hnae3_ring_chain_node
*node
;
4889 struct hclge_desc desc
;
4890 struct hclge_ctrl_vector_chain_cmd
*req
=
4891 (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
4892 enum hclge_comm_cmd_status status
;
4893 enum hclge_opcode_type op
;
4894 u16 tqp_type_and_id
;
4897 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
4898 hclge_cmd_setup_basic_desc(&desc
, op
, false);
4899 req
->int_vector_id_l
= hnae3_get_field(vector_id
,
4900 HCLGE_VECTOR_ID_L_M
,
4901 HCLGE_VECTOR_ID_L_S
);
4902 req
->int_vector_id_h
= hnae3_get_field(vector_id
,
4903 HCLGE_VECTOR_ID_H_M
,
4904 HCLGE_VECTOR_ID_H_S
);
4907 for (node
= ring_chain
; node
; node
= node
->next
) {
4908 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
4909 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
4911 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
4912 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
4913 HCLGE_TQP_ID_S
, node
->tqp_index
);
4914 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
4916 hnae3_get_field(node
->int_gl_idx
,
4917 HNAE3_RING_GL_IDX_M
,
4918 HNAE3_RING_GL_IDX_S
));
4919 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
4920 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
4921 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
4922 req
->vfid
= vport
->vport_id
;
4924 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4926 dev_err(&hdev
->pdev
->dev
,
4927 "Map TQP fail, status is %d.\n",
4933 hclge_cmd_setup_basic_desc(&desc
,
4936 req
->int_vector_id_l
=
4937 hnae3_get_field(vector_id
,
4938 HCLGE_VECTOR_ID_L_M
,
4939 HCLGE_VECTOR_ID_L_S
);
4940 req
->int_vector_id_h
=
4941 hnae3_get_field(vector_id
,
4942 HCLGE_VECTOR_ID_H_M
,
4943 HCLGE_VECTOR_ID_H_S
);
4948 req
->int_cause_num
= i
;
4949 req
->vfid
= vport
->vport_id
;
4950 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4952 dev_err(&hdev
->pdev
->dev
,
4953 "Map TQP fail, status is %d.\n", status
);
4961 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
4962 struct hnae3_ring_chain_node
*ring_chain
)
4964 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4965 struct hclge_dev
*hdev
= vport
->back
;
4968 vector_id
= hclge_get_vector_index(hdev
, vector
);
4969 if (vector_id
< 0) {
4970 dev_err(&hdev
->pdev
->dev
,
4971 "failed to get vector index. vector=%d\n", vector
);
4975 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
4978 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
, int vector
,
4979 struct hnae3_ring_chain_node
*ring_chain
)
4981 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4982 struct hclge_dev
*hdev
= vport
->back
;
4985 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
4988 vector_id
= hclge_get_vector_index(hdev
, vector
);
4989 if (vector_id
< 0) {
4990 dev_err(&handle
->pdev
->dev
,
4991 "Get vector index fail. ret =%d\n", vector_id
);
4995 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
4997 dev_err(&handle
->pdev
->dev
,
4998 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5004 static int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
, u8 vf_id
,
5005 bool en_uc
, bool en_mc
, bool en_bc
)
5007 struct hclge_vport
*vport
= &hdev
->vport
[vf_id
];
5008 struct hnae3_handle
*handle
= &vport
->nic
;
5009 struct hclge_promisc_cfg_cmd
*req
;
5010 struct hclge_desc desc
;
5011 bool uc_tx_en
= en_uc
;
5015 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
5017 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
5020 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
))
5023 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_RX_EN
, en_uc
? 1 : 0);
5024 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_RX_EN
, en_mc
? 1 : 0);
5025 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_RX_EN
, en_bc
? 1 : 0);
5026 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_TX_EN
, uc_tx_en
? 1 : 0);
5027 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_TX_EN
, en_mc
? 1 : 0);
5028 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_TX_EN
, en_bc
? 1 : 0);
5029 req
->extend_promisc
= promisc_cfg
;
5031 /* to be compatible with DEVICE_VERSION_V1/2 */
5033 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_UC
, en_uc
? 1 : 0);
5034 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_MC
, en_mc
? 1 : 0);
5035 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_BC
, en_bc
? 1 : 0);
5036 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_TX_EN
, 1);
5037 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_RX_EN
, 1);
5038 req
->promisc
= promisc_cfg
;
5040 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5042 dev_err(&hdev
->pdev
->dev
,
5043 "failed to set vport %u promisc mode, ret = %d.\n",
5049 int hclge_set_vport_promisc_mode(struct hclge_vport
*vport
, bool en_uc_pmc
,
5050 bool en_mc_pmc
, bool en_bc_pmc
)
5052 return hclge_cmd_set_promisc_mode(vport
->back
, vport
->vport_id
,
5053 en_uc_pmc
, en_mc_pmc
, en_bc_pmc
);
5056 static int hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
5059 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5060 struct hclge_dev
*hdev
= vport
->back
;
5061 bool en_bc_pmc
= true;
5063 /* For device whose version below V2, if broadcast promisc enabled,
5064 * vlan filter is always bypassed. So broadcast promisc should be
5065 * disabled until user enable promisc mode
5067 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
5068 en_bc_pmc
= handle
->netdev_flags
& HNAE3_BPE
? true : false;
5070 return hclge_set_vport_promisc_mode(vport
, en_uc_pmc
, en_mc_pmc
,
5074 static void hclge_request_update_promisc_mode(struct hnae3_handle
*handle
)
5076 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5078 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
5081 static void hclge_sync_fd_state(struct hclge_dev
*hdev
)
5083 if (hlist_empty(&hdev
->fd_rule_list
))
5084 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
5087 static void hclge_fd_inc_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5089 if (!test_bit(location
, hdev
->fd_bmap
)) {
5090 set_bit(location
, hdev
->fd_bmap
);
5091 hdev
->hclge_fd_rule_num
++;
5095 static void hclge_fd_dec_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5097 if (test_bit(location
, hdev
->fd_bmap
)) {
5098 clear_bit(location
, hdev
->fd_bmap
);
5099 hdev
->hclge_fd_rule_num
--;
5103 static void hclge_fd_free_node(struct hclge_dev
*hdev
,
5104 struct hclge_fd_rule
*rule
)
5106 hlist_del(&rule
->rule_node
);
5108 hclge_sync_fd_state(hdev
);
5111 static void hclge_update_fd_rule_node(struct hclge_dev
*hdev
,
5112 struct hclge_fd_rule
*old_rule
,
5113 struct hclge_fd_rule
*new_rule
,
5114 enum HCLGE_FD_NODE_STATE state
)
5117 case HCLGE_FD_TO_ADD
:
5118 case HCLGE_FD_ACTIVE
:
5119 /* 1) if the new state is TO_ADD, just replace the old rule
5120 * with the same location, no matter its state, because the
5121 * new rule will be configured to the hardware.
5122 * 2) if the new state is ACTIVE, it means the new rule
5123 * has been configured to the hardware, so just replace
5124 * the old rule node with the same location.
5125 * 3) for it doesn't add a new node to the list, so it's
5126 * unnecessary to update the rule number and fd_bmap.
5128 new_rule
->rule_node
.next
= old_rule
->rule_node
.next
;
5129 new_rule
->rule_node
.pprev
= old_rule
->rule_node
.pprev
;
5130 memcpy(old_rule
, new_rule
, sizeof(*old_rule
));
5133 case HCLGE_FD_DELETED
:
5134 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5135 hclge_fd_free_node(hdev
, old_rule
);
5137 case HCLGE_FD_TO_DEL
:
5138 /* if new request is TO_DEL, and old rule is existent
5139 * 1) the state of old rule is TO_DEL, we need do nothing,
5140 * because we delete rule by location, other rule content
5142 * 2) the state of old rule is ACTIVE, we need to change its
5143 * state to TO_DEL, so the rule will be deleted when periodic
5144 * task being scheduled.
5145 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5146 * been added to hardware, so we just delete the rule node from
5147 * fd_rule_list directly.
5149 if (old_rule
->state
== HCLGE_FD_TO_ADD
) {
5150 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5151 hclge_fd_free_node(hdev
, old_rule
);
5154 old_rule
->state
= HCLGE_FD_TO_DEL
;
5159 static struct hclge_fd_rule
*hclge_find_fd_rule(struct hlist_head
*hlist
,
5161 struct hclge_fd_rule
**parent
)
5163 struct hclge_fd_rule
*rule
;
5164 struct hlist_node
*node
;
5166 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
5167 if (rule
->location
== location
)
5169 else if (rule
->location
> location
)
5171 /* record the parent node, use to keep the nodes in fd_rule_list
5180 /* insert fd rule node in ascend order according to rule->location */
5181 static void hclge_fd_insert_rule_node(struct hlist_head
*hlist
,
5182 struct hclge_fd_rule
*rule
,
5183 struct hclge_fd_rule
*parent
)
5185 INIT_HLIST_NODE(&rule
->rule_node
);
5188 hlist_add_behind(&rule
->rule_node
, &parent
->rule_node
);
5190 hlist_add_head(&rule
->rule_node
, hlist
);
5193 static int hclge_fd_set_user_def_cmd(struct hclge_dev
*hdev
,
5194 struct hclge_fd_user_def_cfg
*cfg
)
5196 struct hclge_fd_user_def_cfg_cmd
*req
;
5197 struct hclge_desc desc
;
5201 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_USER_DEF_OP
, false);
5203 req
= (struct hclge_fd_user_def_cfg_cmd
*)desc
.data
;
5205 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[0].ref_cnt
> 0);
5206 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5207 HCLGE_FD_USER_DEF_OFT_S
, cfg
[0].offset
);
5208 req
->ol2_cfg
= cpu_to_le16(data
);
5211 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[1].ref_cnt
> 0);
5212 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5213 HCLGE_FD_USER_DEF_OFT_S
, cfg
[1].offset
);
5214 req
->ol3_cfg
= cpu_to_le16(data
);
5217 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[2].ref_cnt
> 0);
5218 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5219 HCLGE_FD_USER_DEF_OFT_S
, cfg
[2].offset
);
5220 req
->ol4_cfg
= cpu_to_le16(data
);
5222 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5224 dev_err(&hdev
->pdev
->dev
,
5225 "failed to set fd user def data, ret= %d\n", ret
);
5229 static void hclge_sync_fd_user_def_cfg(struct hclge_dev
*hdev
, bool locked
)
5233 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
))
5237 spin_lock_bh(&hdev
->fd_rule_lock
);
5239 ret
= hclge_fd_set_user_def_cmd(hdev
, hdev
->fd_cfg
.user_def_cfg
);
5241 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5244 spin_unlock_bh(&hdev
->fd_rule_lock
);
5247 static int hclge_fd_check_user_def_refcnt(struct hclge_dev
*hdev
,
5248 struct hclge_fd_rule
*rule
)
5250 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5251 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5252 struct hclge_fd_user_def_info
*info
, *old_info
;
5253 struct hclge_fd_user_def_cfg
*cfg
;
5255 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5256 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5259 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5260 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5261 info
= &rule
->ep
.user_def
;
5263 if (!cfg
->ref_cnt
|| cfg
->offset
== info
->offset
)
5266 if (cfg
->ref_cnt
> 1)
5269 fd_rule
= hclge_find_fd_rule(hlist
, rule
->location
, &parent
);
5271 old_info
= &fd_rule
->ep
.user_def
;
5272 if (info
->layer
== old_info
->layer
)
5277 dev_err(&hdev
->pdev
->dev
,
5278 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5283 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev
*hdev
,
5284 struct hclge_fd_rule
*rule
)
5286 struct hclge_fd_user_def_cfg
*cfg
;
5288 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5289 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5292 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5293 if (!cfg
->ref_cnt
) {
5294 cfg
->offset
= rule
->ep
.user_def
.offset
;
5295 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5300 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev
*hdev
,
5301 struct hclge_fd_rule
*rule
)
5303 struct hclge_fd_user_def_cfg
*cfg
;
5305 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5306 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5309 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5314 if (!cfg
->ref_cnt
) {
5316 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5320 static void hclge_update_fd_list(struct hclge_dev
*hdev
,
5321 enum HCLGE_FD_NODE_STATE state
, u16 location
,
5322 struct hclge_fd_rule
*new_rule
)
5324 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5325 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5327 fd_rule
= hclge_find_fd_rule(hlist
, location
, &parent
);
5329 hclge_fd_dec_user_def_refcnt(hdev
, fd_rule
);
5330 if (state
== HCLGE_FD_ACTIVE
)
5331 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5332 hclge_sync_fd_user_def_cfg(hdev
, true);
5334 hclge_update_fd_rule_node(hdev
, fd_rule
, new_rule
, state
);
5338 /* it's unlikely to fail here, because we have checked the rule
5341 if (unlikely(state
== HCLGE_FD_TO_DEL
|| state
== HCLGE_FD_DELETED
)) {
5342 dev_warn(&hdev
->pdev
->dev
,
5343 "failed to delete fd rule %u, it's inexistent\n",
5348 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5349 hclge_sync_fd_user_def_cfg(hdev
, true);
5351 hclge_fd_insert_rule_node(hlist
, new_rule
, parent
);
5352 hclge_fd_inc_rule_cnt(hdev
, new_rule
->location
);
5354 if (state
== HCLGE_FD_TO_ADD
) {
5355 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
5356 hclge_task_schedule(hdev
, 0);
5360 static int hclge_get_fd_mode(struct hclge_dev
*hdev
, u8
*fd_mode
)
5362 struct hclge_get_fd_mode_cmd
*req
;
5363 struct hclge_desc desc
;
5366 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_MODE_CTRL
, true);
5368 req
= (struct hclge_get_fd_mode_cmd
*)desc
.data
;
5370 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5372 dev_err(&hdev
->pdev
->dev
, "get fd mode fail, ret=%d\n", ret
);
5376 *fd_mode
= req
->mode
;
5381 static int hclge_get_fd_allocation(struct hclge_dev
*hdev
,
5382 u32
*stage1_entry_num
,
5383 u32
*stage2_entry_num
,
5384 u16
*stage1_counter_num
,
5385 u16
*stage2_counter_num
)
5387 struct hclge_get_fd_allocation_cmd
*req
;
5388 struct hclge_desc desc
;
5391 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_GET_ALLOCATION
, true);
5393 req
= (struct hclge_get_fd_allocation_cmd
*)desc
.data
;
5395 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5397 dev_err(&hdev
->pdev
->dev
, "query fd allocation fail, ret=%d\n",
5402 *stage1_entry_num
= le32_to_cpu(req
->stage1_entry_num
);
5403 *stage2_entry_num
= le32_to_cpu(req
->stage2_entry_num
);
5404 *stage1_counter_num
= le16_to_cpu(req
->stage1_counter_num
);
5405 *stage2_counter_num
= le16_to_cpu(req
->stage2_counter_num
);
5410 static int hclge_set_fd_key_config(struct hclge_dev
*hdev
,
5411 enum HCLGE_FD_STAGE stage_num
)
5413 struct hclge_set_fd_key_config_cmd
*req
;
5414 struct hclge_fd_key_cfg
*stage
;
5415 struct hclge_desc desc
;
5418 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_KEY_CONFIG
, false);
5420 req
= (struct hclge_set_fd_key_config_cmd
*)desc
.data
;
5421 stage
= &hdev
->fd_cfg
.key_cfg
[stage_num
];
5422 req
->stage
= stage_num
;
5423 req
->key_select
= stage
->key_sel
;
5424 req
->inner_sipv6_word_en
= stage
->inner_sipv6_word_en
;
5425 req
->inner_dipv6_word_en
= stage
->inner_dipv6_word_en
;
5426 req
->outer_sipv6_word_en
= stage
->outer_sipv6_word_en
;
5427 req
->outer_dipv6_word_en
= stage
->outer_dipv6_word_en
;
5428 req
->tuple_mask
= cpu_to_le32(~stage
->tuple_active
);
5429 req
->meta_data_mask
= cpu_to_le32(~stage
->meta_data_active
);
5431 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5433 dev_err(&hdev
->pdev
->dev
, "set fd key fail, ret=%d\n", ret
);
5438 static void hclge_fd_disable_user_def(struct hclge_dev
*hdev
)
5440 struct hclge_fd_user_def_cfg
*cfg
= hdev
->fd_cfg
.user_def_cfg
;
5442 spin_lock_bh(&hdev
->fd_rule_lock
);
5443 memset(cfg
, 0, sizeof(hdev
->fd_cfg
.user_def_cfg
));
5444 spin_unlock_bh(&hdev
->fd_rule_lock
);
5446 hclge_fd_set_user_def_cmd(hdev
, cfg
);
5449 static int hclge_init_fd_config(struct hclge_dev
*hdev
)
5451 #define LOW_2_WORDS 0x03
5452 struct hclge_fd_key_cfg
*key_cfg
;
5455 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
5458 ret
= hclge_get_fd_mode(hdev
, &hdev
->fd_cfg
.fd_mode
);
5462 switch (hdev
->fd_cfg
.fd_mode
) {
5463 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
:
5464 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
;
5466 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
:
5467 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
/ 2;
5470 dev_err(&hdev
->pdev
->dev
,
5471 "Unsupported flow director mode %u\n",
5472 hdev
->fd_cfg
.fd_mode
);
5476 key_cfg
= &hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
];
5477 key_cfg
->key_sel
= HCLGE_FD_KEY_BASE_ON_TUPLE
;
5478 key_cfg
->inner_sipv6_word_en
= LOW_2_WORDS
;
5479 key_cfg
->inner_dipv6_word_en
= LOW_2_WORDS
;
5480 key_cfg
->outer_sipv6_word_en
= 0;
5481 key_cfg
->outer_dipv6_word_en
= 0;
5483 key_cfg
->tuple_active
= BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_ETH_TYPE
) |
5484 BIT(INNER_IP_PROTO
) | BIT(INNER_IP_TOS
) |
5485 BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
5486 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5488 /* If use max 400bit key, we can support tuples for ether type */
5489 if (hdev
->fd_cfg
.fd_mode
== HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
5490 key_cfg
->tuple_active
|=
5491 BIT(INNER_DST_MAC
) | BIT(INNER_SRC_MAC
);
5492 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
5493 key_cfg
->tuple_active
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
5496 /* roce_type is used to filter roce frames
5497 * dst_vport is used to specify the rule
5499 key_cfg
->meta_data_active
= BIT(ROCE_TYPE
) | BIT(DST_VPORT
);
5501 ret
= hclge_get_fd_allocation(hdev
,
5502 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
],
5503 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_2
],
5504 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
],
5505 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_2
]);
5509 return hclge_set_fd_key_config(hdev
, HCLGE_FD_STAGE_1
);
5512 static int hclge_fd_tcam_config(struct hclge_dev
*hdev
, u8 stage
, bool sel_x
,
5513 int loc
, u8
*key
, bool is_add
)
5515 struct hclge_fd_tcam_config_1_cmd
*req1
;
5516 struct hclge_fd_tcam_config_2_cmd
*req2
;
5517 struct hclge_fd_tcam_config_3_cmd
*req3
;
5518 struct hclge_desc desc
[3];
5521 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_FD_TCAM_OP
, false);
5522 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5523 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_FD_TCAM_OP
, false);
5524 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5525 hclge_cmd_setup_basic_desc(&desc
[2], HCLGE_OPC_FD_TCAM_OP
, false);
5527 req1
= (struct hclge_fd_tcam_config_1_cmd
*)desc
[0].data
;
5528 req2
= (struct hclge_fd_tcam_config_2_cmd
*)desc
[1].data
;
5529 req3
= (struct hclge_fd_tcam_config_3_cmd
*)desc
[2].data
;
5531 req1
->stage
= stage
;
5532 req1
->xy_sel
= sel_x
? 1 : 0;
5533 hnae3_set_bit(req1
->port_info
, HCLGE_FD_EPORT_SW_EN_B
, 0);
5534 req1
->index
= cpu_to_le32(loc
);
5535 req1
->entry_vld
= sel_x
? is_add
: 0;
5538 memcpy(req1
->tcam_data
, &key
[0], sizeof(req1
->tcam_data
));
5539 memcpy(req2
->tcam_data
, &key
[sizeof(req1
->tcam_data
)],
5540 sizeof(req2
->tcam_data
));
5541 memcpy(req3
->tcam_data
, &key
[sizeof(req1
->tcam_data
) +
5542 sizeof(req2
->tcam_data
)], sizeof(req3
->tcam_data
));
5545 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
5547 dev_err(&hdev
->pdev
->dev
,
5548 "config tcam key fail, ret=%d\n",
5554 static int hclge_fd_ad_config(struct hclge_dev
*hdev
, u8 stage
, int loc
,
5555 struct hclge_fd_ad_data
*action
)
5557 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
5558 struct hclge_fd_ad_config_cmd
*req
;
5559 struct hclge_desc desc
;
5563 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_AD_OP
, false);
5565 req
= (struct hclge_fd_ad_config_cmd
*)desc
.data
;
5566 req
->index
= cpu_to_le32(loc
);
5569 hnae3_set_bit(ad_data
, HCLGE_FD_AD_WR_RULE_ID_B
,
5570 action
->write_rule_id_to_bd
);
5571 hnae3_set_field(ad_data
, HCLGE_FD_AD_RULE_ID_M
, HCLGE_FD_AD_RULE_ID_S
,
5573 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
)) {
5574 hnae3_set_bit(ad_data
, HCLGE_FD_AD_TC_OVRD_B
,
5575 action
->override_tc
);
5576 hnae3_set_field(ad_data
, HCLGE_FD_AD_TC_SIZE_M
,
5577 HCLGE_FD_AD_TC_SIZE_S
, (u32
)action
->tc_size
);
5580 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DROP_B
, action
->drop_packet
);
5581 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DIRECT_QID_B
,
5582 action
->forward_to_direct_queue
);
5583 hnae3_set_field(ad_data
, HCLGE_FD_AD_QID_M
, HCLGE_FD_AD_QID_S
,
5585 hnae3_set_bit(ad_data
, HCLGE_FD_AD_USE_COUNTER_B
, action
->use_counter
);
5586 hnae3_set_field(ad_data
, HCLGE_FD_AD_COUNTER_NUM_M
,
5587 HCLGE_FD_AD_COUNTER_NUM_S
, action
->counter_id
);
5588 hnae3_set_bit(ad_data
, HCLGE_FD_AD_NXT_STEP_B
, action
->use_next_stage
);
5589 hnae3_set_field(ad_data
, HCLGE_FD_AD_NXT_KEY_M
, HCLGE_FD_AD_NXT_KEY_S
,
5590 action
->counter_id
);
5592 req
->ad_data
= cpu_to_le64(ad_data
);
5593 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5595 dev_err(&hdev
->pdev
->dev
, "fd ad config fail, ret=%d\n", ret
);
5600 static bool hclge_fd_convert_tuple(u32 tuple_bit
, u8
*key_x
, u8
*key_y
,
5601 struct hclge_fd_rule
*rule
)
5603 int offset
, moffset
, ip_offset
;
5604 enum HCLGE_FD_KEY_OPT key_opt
;
5605 u16 tmp_x_s
, tmp_y_s
;
5606 u32 tmp_x_l
, tmp_y_l
;
5610 if (rule
->unused_tuple
& BIT(tuple_bit
))
5613 key_opt
= tuple_key_info
[tuple_bit
].key_opt
;
5614 offset
= tuple_key_info
[tuple_bit
].offset
;
5615 moffset
= tuple_key_info
[tuple_bit
].moffset
;
5619 calc_x(*key_x
, p
[offset
], p
[moffset
]);
5620 calc_y(*key_y
, p
[offset
], p
[moffset
]);
5624 calc_x(tmp_x_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5625 calc_y(tmp_y_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5626 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
5627 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
5631 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5632 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5633 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5634 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5638 for (i
= 0; i
< ETH_ALEN
; i
++) {
5639 calc_x(key_x
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5641 calc_y(key_y
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5647 ip_offset
= IPV4_INDEX
* sizeof(u32
);
5648 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5649 *(u32
*)(&p
[moffset
+ ip_offset
]));
5650 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5651 *(u32
*)(&p
[moffset
+ ip_offset
]));
5652 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5653 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5661 static u32
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type
, u8 pf_id
,
5662 u8 vf_id
, u8 network_port_id
)
5664 u32 port_number
= 0;
5666 if (port_type
== HOST_PORT
) {
5667 hnae3_set_field(port_number
, HCLGE_PF_ID_M
, HCLGE_PF_ID_S
,
5669 hnae3_set_field(port_number
, HCLGE_VF_ID_M
, HCLGE_VF_ID_S
,
5671 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, HOST_PORT
);
5673 hnae3_set_field(port_number
, HCLGE_NETWORK_PORT_ID_M
,
5674 HCLGE_NETWORK_PORT_ID_S
, network_port_id
);
5675 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, NETWORK_PORT
);
5681 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg
*key_cfg
,
5682 __le32
*key_x
, __le32
*key_y
,
5683 struct hclge_fd_rule
*rule
)
5685 u32 tuple_bit
, meta_data
= 0, tmp_x
, tmp_y
, port_number
;
5686 u8 cur_pos
= 0, tuple_size
, shift_bits
;
5689 for (i
= 0; i
< MAX_META_DATA
; i
++) {
5690 tuple_size
= meta_data_key_info
[i
].key_length
;
5691 tuple_bit
= key_cfg
->meta_data_active
& BIT(i
);
5693 switch (tuple_bit
) {
5694 case BIT(ROCE_TYPE
):
5695 hnae3_set_bit(meta_data
, cur_pos
, NIC_PACKET
);
5696 cur_pos
+= tuple_size
;
5698 case BIT(DST_VPORT
):
5699 port_number
= hclge_get_port_number(HOST_PORT
, 0,
5701 hnae3_set_field(meta_data
,
5702 GENMASK(cur_pos
+ tuple_size
, cur_pos
),
5703 cur_pos
, port_number
);
5704 cur_pos
+= tuple_size
;
5711 calc_x(tmp_x
, meta_data
, 0xFFFFFFFF);
5712 calc_y(tmp_y
, meta_data
, 0xFFFFFFFF);
5713 shift_bits
= sizeof(meta_data
) * 8 - cur_pos
;
5715 *key_x
= cpu_to_le32(tmp_x
<< shift_bits
);
5716 *key_y
= cpu_to_le32(tmp_y
<< shift_bits
);
5719 /* A complete key is combined with meta data key and tuple key.
5720 * Meta data key is stored at the MSB region, and tuple key is stored at
5721 * the LSB region, unused bits will be filled 0.
5723 static int hclge_config_key(struct hclge_dev
*hdev
, u8 stage
,
5724 struct hclge_fd_rule
*rule
)
5726 struct hclge_fd_key_cfg
*key_cfg
= &hdev
->fd_cfg
.key_cfg
[stage
];
5727 u8 key_x
[MAX_KEY_BYTES
], key_y
[MAX_KEY_BYTES
];
5728 u8
*cur_key_x
, *cur_key_y
;
5729 u8 meta_data_region
;
5734 memset(key_x
, 0, sizeof(key_x
));
5735 memset(key_y
, 0, sizeof(key_y
));
5739 for (i
= 0; i
< MAX_TUPLE
; i
++) {
5742 tuple_size
= tuple_key_info
[i
].key_length
/ 8;
5743 if (!(key_cfg
->tuple_active
& BIT(i
)))
5746 tuple_valid
= hclge_fd_convert_tuple(i
, cur_key_x
,
5749 cur_key_x
+= tuple_size
;
5750 cur_key_y
+= tuple_size
;
5754 meta_data_region
= hdev
->fd_cfg
.max_key_length
/ 8 -
5755 MAX_META_DATA_LENGTH
/ 8;
5757 hclge_fd_convert_meta_data(key_cfg
,
5758 (__le32
*)(key_x
+ meta_data_region
),
5759 (__le32
*)(key_y
+ meta_data_region
),
5762 ret
= hclge_fd_tcam_config(hdev
, stage
, false, rule
->location
, key_y
,
5765 dev_err(&hdev
->pdev
->dev
,
5766 "fd key_y config fail, loc=%u, ret=%d\n",
5767 rule
->queue_id
, ret
);
5771 ret
= hclge_fd_tcam_config(hdev
, stage
, true, rule
->location
, key_x
,
5774 dev_err(&hdev
->pdev
->dev
,
5775 "fd key_x config fail, loc=%u, ret=%d\n",
5776 rule
->queue_id
, ret
);
5780 static int hclge_config_action(struct hclge_dev
*hdev
, u8 stage
,
5781 struct hclge_fd_rule
*rule
)
5783 struct hclge_vport
*vport
= hdev
->vport
;
5784 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5785 struct hclge_fd_ad_data ad_data
;
5787 memset(&ad_data
, 0, sizeof(struct hclge_fd_ad_data
));
5788 ad_data
.ad_id
= rule
->location
;
5790 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
5791 ad_data
.drop_packet
= true;
5792 } else if (rule
->action
== HCLGE_FD_ACTION_SELECT_TC
) {
5793 ad_data
.override_tc
= true;
5795 kinfo
->tc_info
.tqp_offset
[rule
->cls_flower
.tc
];
5797 ilog2(kinfo
->tc_info
.tqp_count
[rule
->cls_flower
.tc
]);
5799 ad_data
.forward_to_direct_queue
= true;
5800 ad_data
.queue_id
= rule
->queue_id
;
5803 if (hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
]) {
5804 ad_data
.use_counter
= true;
5805 ad_data
.counter_id
= rule
->vf_id
%
5806 hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
];
5808 ad_data
.use_counter
= false;
5809 ad_data
.counter_id
= 0;
5812 ad_data
.use_next_stage
= false;
5813 ad_data
.next_input_key
= 0;
5815 ad_data
.write_rule_id_to_bd
= true;
5816 ad_data
.rule_id
= rule
->location
;
5818 return hclge_fd_ad_config(hdev
, stage
, ad_data
.ad_id
, &ad_data
);
5821 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec
*spec
,
5824 if (!spec
|| !unused_tuple
)
5827 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5830 *unused_tuple
|= BIT(INNER_SRC_IP
);
5833 *unused_tuple
|= BIT(INNER_DST_IP
);
5836 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5839 *unused_tuple
|= BIT(INNER_DST_PORT
);
5842 *unused_tuple
|= BIT(INNER_IP_TOS
);
5847 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec
*spec
,
5850 if (!spec
|| !unused_tuple
)
5853 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5854 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5857 *unused_tuple
|= BIT(INNER_SRC_IP
);
5860 *unused_tuple
|= BIT(INNER_DST_IP
);
5863 *unused_tuple
|= BIT(INNER_IP_TOS
);
5866 *unused_tuple
|= BIT(INNER_IP_PROTO
);
5868 if (spec
->l4_4_bytes
)
5871 if (spec
->ip_ver
!= ETH_RX_NFC_IP4
)
5877 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec
*spec
,
5880 if (!spec
|| !unused_tuple
)
5883 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5885 /* check whether src/dst ip address used */
5886 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5887 *unused_tuple
|= BIT(INNER_SRC_IP
);
5889 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5890 *unused_tuple
|= BIT(INNER_DST_IP
);
5893 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5896 *unused_tuple
|= BIT(INNER_DST_PORT
);
5899 *unused_tuple
|= BIT(INNER_IP_TOS
);
5904 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec
*spec
,
5907 if (!spec
|| !unused_tuple
)
5910 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5911 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5913 /* check whether src/dst ip address used */
5914 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5915 *unused_tuple
|= BIT(INNER_SRC_IP
);
5917 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5918 *unused_tuple
|= BIT(INNER_DST_IP
);
5920 if (!spec
->l4_proto
)
5921 *unused_tuple
|= BIT(INNER_IP_PROTO
);
5924 *unused_tuple
|= BIT(INNER_IP_TOS
);
5926 if (spec
->l4_4_bytes
)
5932 static int hclge_fd_check_ether_tuple(struct ethhdr
*spec
, u32
*unused_tuple
)
5934 if (!spec
|| !unused_tuple
)
5937 *unused_tuple
|= BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
5938 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
) |
5939 BIT(INNER_IP_TOS
) | BIT(INNER_IP_PROTO
);
5941 if (is_zero_ether_addr(spec
->h_source
))
5942 *unused_tuple
|= BIT(INNER_SRC_MAC
);
5944 if (is_zero_ether_addr(spec
->h_dest
))
5945 *unused_tuple
|= BIT(INNER_DST_MAC
);
5948 *unused_tuple
|= BIT(INNER_ETH_TYPE
);
5953 static int hclge_fd_check_ext_tuple(struct hclge_dev
*hdev
,
5954 struct ethtool_rx_flow_spec
*fs
,
5957 if (fs
->flow_type
& FLOW_EXT
) {
5958 if (fs
->h_ext
.vlan_etype
) {
5959 dev_err(&hdev
->pdev
->dev
, "vlan-etype is not supported!\n");
5963 if (!fs
->h_ext
.vlan_tci
)
5964 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
5966 if (fs
->m_ext
.vlan_tci
&&
5967 be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
) {
5968 dev_err(&hdev
->pdev
->dev
,
5969 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5970 ntohs(fs
->h_ext
.vlan_tci
), VLAN_N_VID
- 1);
5974 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
5977 if (fs
->flow_type
& FLOW_MAC_EXT
) {
5978 if (hdev
->fd_cfg
.fd_mode
!=
5979 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
5980 dev_err(&hdev
->pdev
->dev
,
5981 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5985 if (is_zero_ether_addr(fs
->h_ext
.h_dest
))
5986 *unused_tuple
|= BIT(INNER_DST_MAC
);
5988 *unused_tuple
&= ~BIT(INNER_DST_MAC
);
5994 static int hclge_fd_get_user_def_layer(u32 flow_type
, u32
*unused_tuple
,
5995 struct hclge_fd_user_def_info
*info
)
5997 switch (flow_type
) {
5999 info
->layer
= HCLGE_FD_USER_DEF_L2
;
6000 *unused_tuple
&= ~BIT(INNER_L2_RSV
);
6003 case IPV6_USER_FLOW
:
6004 info
->layer
= HCLGE_FD_USER_DEF_L3
;
6005 *unused_tuple
&= ~BIT(INNER_L3_RSV
);
6011 info
->layer
= HCLGE_FD_USER_DEF_L4
;
6012 *unused_tuple
&= ~BIT(INNER_L4_RSV
);
6021 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec
*fs
)
6023 return be32_to_cpu(fs
->m_ext
.data
[1] | fs
->m_ext
.data
[0]) == 0;
6026 static int hclge_fd_parse_user_def_field(struct hclge_dev
*hdev
,
6027 struct ethtool_rx_flow_spec
*fs
,
6029 struct hclge_fd_user_def_info
*info
)
6031 u32 tuple_active
= hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
].tuple_active
;
6032 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6033 u16 data
, offset
, data_mask
, offset_mask
;
6036 info
->layer
= HCLGE_FD_USER_DEF_NONE
;
6037 *unused_tuple
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
6039 if (!(fs
->flow_type
& FLOW_EXT
) || hclge_fd_is_user_def_all_masked(fs
))
6042 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6043 * for data, and bit32~47 is used for offset.
6045 data
= be32_to_cpu(fs
->h_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6046 data_mask
= be32_to_cpu(fs
->m_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6047 offset
= be32_to_cpu(fs
->h_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6048 offset_mask
= be32_to_cpu(fs
->m_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6050 if (!(tuple_active
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
)) {
6051 dev_err(&hdev
->pdev
->dev
, "user-def bytes are not supported\n");
6055 if (offset
> HCLGE_FD_MAX_USER_DEF_OFFSET
) {
6056 dev_err(&hdev
->pdev
->dev
,
6057 "user-def offset[%u] should be no more than %u\n",
6058 offset
, HCLGE_FD_MAX_USER_DEF_OFFSET
);
6062 if (offset_mask
!= HCLGE_FD_USER_DEF_OFFSET_UNMASK
) {
6063 dev_err(&hdev
->pdev
->dev
, "user-def offset can't be masked\n");
6067 ret
= hclge_fd_get_user_def_layer(flow_type
, unused_tuple
, info
);
6069 dev_err(&hdev
->pdev
->dev
,
6070 "unsupported flow type for user-def bytes, ret = %d\n",
6076 info
->data_mask
= data_mask
;
6077 info
->offset
= offset
;
6082 static int hclge_fd_check_spec(struct hclge_dev
*hdev
,
6083 struct ethtool_rx_flow_spec
*fs
,
6085 struct hclge_fd_user_def_info
*info
)
6090 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
6091 dev_err(&hdev
->pdev
->dev
,
6092 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6094 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
] - 1);
6098 ret
= hclge_fd_parse_user_def_field(hdev
, fs
, unused_tuple
, info
);
6102 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6103 switch (flow_type
) {
6107 ret
= hclge_fd_check_tcpip4_tuple(&fs
->h_u
.tcp_ip4_spec
,
6111 ret
= hclge_fd_check_ip4_tuple(&fs
->h_u
.usr_ip4_spec
,
6117 ret
= hclge_fd_check_tcpip6_tuple(&fs
->h_u
.tcp_ip6_spec
,
6120 case IPV6_USER_FLOW
:
6121 ret
= hclge_fd_check_ip6_tuple(&fs
->h_u
.usr_ip6_spec
,
6125 if (hdev
->fd_cfg
.fd_mode
!=
6126 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
6127 dev_err(&hdev
->pdev
->dev
,
6128 "ETHER_FLOW is not supported in current fd mode!\n");
6132 ret
= hclge_fd_check_ether_tuple(&fs
->h_u
.ether_spec
,
6136 dev_err(&hdev
->pdev
->dev
,
6137 "unsupported protocol type, protocol type = %#x\n",
6143 dev_err(&hdev
->pdev
->dev
,
6144 "failed to check flow union tuple, ret = %d\n",
6149 return hclge_fd_check_ext_tuple(hdev
, fs
, unused_tuple
);
6152 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6153 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6155 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6156 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4src
);
6157 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6158 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4src
);
6160 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6161 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4dst
);
6162 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6163 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4dst
);
6165 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.psrc
);
6166 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.psrc
);
6168 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.pdst
);
6169 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.pdst
);
6171 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip4_spec
.tos
;
6172 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip4_spec
.tos
;
6174 rule
->tuples
.ether_proto
= ETH_P_IP
;
6175 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6177 rule
->tuples
.ip_proto
= ip_proto
;
6178 rule
->tuples_mask
.ip_proto
= 0xFF;
6181 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6182 struct hclge_fd_rule
*rule
)
6184 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6185 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4src
);
6186 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6187 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4src
);
6189 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6190 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4dst
);
6191 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6192 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4dst
);
6194 rule
->tuples
.ip_tos
= fs
->h_u
.usr_ip4_spec
.tos
;
6195 rule
->tuples_mask
.ip_tos
= fs
->m_u
.usr_ip4_spec
.tos
;
6197 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip4_spec
.proto
;
6198 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
6200 rule
->tuples
.ether_proto
= ETH_P_IP
;
6201 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6204 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6205 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6207 be32_to_cpu_array(rule
->tuples
.src_ip
, fs
->h_u
.tcp_ip6_spec
.ip6src
,
6209 be32_to_cpu_array(rule
->tuples_mask
.src_ip
, fs
->m_u
.tcp_ip6_spec
.ip6src
,
6212 be32_to_cpu_array(rule
->tuples
.dst_ip
, fs
->h_u
.tcp_ip6_spec
.ip6dst
,
6214 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
, fs
->m_u
.tcp_ip6_spec
.ip6dst
,
6217 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.psrc
);
6218 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.psrc
);
6220 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.pdst
);
6221 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.pdst
);
6223 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6224 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6226 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6227 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6229 rule
->tuples
.ip_proto
= ip_proto
;
6230 rule
->tuples_mask
.ip_proto
= 0xFF;
6233 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6234 struct hclge_fd_rule
*rule
)
6236 be32_to_cpu_array(rule
->tuples
.src_ip
, fs
->h_u
.usr_ip6_spec
.ip6src
,
6238 be32_to_cpu_array(rule
->tuples_mask
.src_ip
, fs
->m_u
.usr_ip6_spec
.ip6src
,
6241 be32_to_cpu_array(rule
->tuples
.dst_ip
, fs
->h_u
.usr_ip6_spec
.ip6dst
,
6243 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
, fs
->m_u
.usr_ip6_spec
.ip6dst
,
6246 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
;
6247 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
6249 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6250 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6252 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6253 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6256 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec
*fs
,
6257 struct hclge_fd_rule
*rule
)
6259 ether_addr_copy(rule
->tuples
.src_mac
, fs
->h_u
.ether_spec
.h_source
);
6260 ether_addr_copy(rule
->tuples_mask
.src_mac
, fs
->m_u
.ether_spec
.h_source
);
6262 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_u
.ether_spec
.h_dest
);
6263 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_u
.ether_spec
.h_dest
);
6265 rule
->tuples
.ether_proto
= be16_to_cpu(fs
->h_u
.ether_spec
.h_proto
);
6266 rule
->tuples_mask
.ether_proto
= be16_to_cpu(fs
->m_u
.ether_spec
.h_proto
);
6269 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info
*info
,
6270 struct hclge_fd_rule
*rule
)
6272 switch (info
->layer
) {
6273 case HCLGE_FD_USER_DEF_L2
:
6274 rule
->tuples
.l2_user_def
= info
->data
;
6275 rule
->tuples_mask
.l2_user_def
= info
->data_mask
;
6277 case HCLGE_FD_USER_DEF_L3
:
6278 rule
->tuples
.l3_user_def
= info
->data
;
6279 rule
->tuples_mask
.l3_user_def
= info
->data_mask
;
6281 case HCLGE_FD_USER_DEF_L4
:
6282 rule
->tuples
.l4_user_def
= (u32
)info
->data
<< 16;
6283 rule
->tuples_mask
.l4_user_def
= (u32
)info
->data_mask
<< 16;
6289 rule
->ep
.user_def
= *info
;
6292 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec
*fs
,
6293 struct hclge_fd_rule
*rule
,
6294 struct hclge_fd_user_def_info
*info
)
6296 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6298 switch (flow_type
) {
6300 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_SCTP
);
6303 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_TCP
);
6306 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_UDP
);
6309 hclge_fd_get_ip4_tuple(fs
, rule
);
6312 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_SCTP
);
6315 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_TCP
);
6318 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_UDP
);
6320 case IPV6_USER_FLOW
:
6321 hclge_fd_get_ip6_tuple(fs
, rule
);
6324 hclge_fd_get_ether_tuple(fs
, rule
);
6330 if (fs
->flow_type
& FLOW_EXT
) {
6331 rule
->tuples
.vlan_tag1
= be16_to_cpu(fs
->h_ext
.vlan_tci
);
6332 rule
->tuples_mask
.vlan_tag1
= be16_to_cpu(fs
->m_ext
.vlan_tci
);
6333 hclge_fd_get_user_def_tuple(info
, rule
);
6336 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6337 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_ext
.h_dest
);
6338 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_ext
.h_dest
);
6344 static int hclge_fd_config_rule(struct hclge_dev
*hdev
,
6345 struct hclge_fd_rule
*rule
)
6349 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
6353 return hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
6356 static int hclge_add_fd_entry_common(struct hclge_dev
*hdev
,
6357 struct hclge_fd_rule
*rule
)
6361 spin_lock_bh(&hdev
->fd_rule_lock
);
6363 if (hdev
->fd_active_type
!= rule
->rule_type
&&
6364 (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6365 hdev
->fd_active_type
== HCLGE_FD_EP_ACTIVE
)) {
6366 dev_err(&hdev
->pdev
->dev
,
6367 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6368 rule
->rule_type
, hdev
->fd_active_type
);
6369 spin_unlock_bh(&hdev
->fd_rule_lock
);
6373 ret
= hclge_fd_check_user_def_refcnt(hdev
, rule
);
6377 ret
= hclge_clear_arfs_rules(hdev
);
6381 ret
= hclge_fd_config_rule(hdev
, rule
);
6385 rule
->state
= HCLGE_FD_ACTIVE
;
6386 hdev
->fd_active_type
= rule
->rule_type
;
6387 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
6390 spin_unlock_bh(&hdev
->fd_rule_lock
);
6394 static bool hclge_is_cls_flower_active(struct hnae3_handle
*handle
)
6396 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6397 struct hclge_dev
*hdev
= vport
->back
;
6399 return hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
;
6402 static int hclge_fd_parse_ring_cookie(struct hclge_dev
*hdev
, u64 ring_cookie
,
6403 u16
*vport_id
, u8
*action
, u16
*queue_id
)
6405 struct hclge_vport
*vport
= hdev
->vport
;
6407 if (ring_cookie
== RX_CLS_FLOW_DISC
) {
6408 *action
= HCLGE_FD_ACTION_DROP_PACKET
;
6410 u32 ring
= ethtool_get_flow_spec_ring(ring_cookie
);
6411 u8 vf
= ethtool_get_flow_spec_ring_vf(ring_cookie
);
6414 /* To keep consistent with user's configuration, minus 1 when
6415 * printing 'vf', because vf id from ethtool is added 1 for vf.
6417 if (vf
> hdev
->num_req_vfs
) {
6418 dev_err(&hdev
->pdev
->dev
,
6419 "Error: vf id (%u) should be less than %u\n",
6420 vf
- 1U, hdev
->num_req_vfs
);
6424 *vport_id
= vf
? hdev
->vport
[vf
].vport_id
: vport
->vport_id
;
6425 tqps
= hdev
->vport
[vf
].nic
.kinfo
.num_tqps
;
6428 dev_err(&hdev
->pdev
->dev
,
6429 "Error: queue id (%u) > max tqp num (%u)\n",
6434 *action
= HCLGE_FD_ACTION_SELECT_QUEUE
;
6441 static int hclge_add_fd_entry(struct hnae3_handle
*handle
,
6442 struct ethtool_rxnfc
*cmd
)
6444 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6445 struct hclge_dev
*hdev
= vport
->back
;
6446 struct hclge_fd_user_def_info info
;
6447 u16 dst_vport_id
= 0, q_index
= 0;
6448 struct ethtool_rx_flow_spec
*fs
;
6449 struct hclge_fd_rule
*rule
;
6454 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
6455 dev_err(&hdev
->pdev
->dev
,
6456 "flow table director is not supported\n");
6461 dev_err(&hdev
->pdev
->dev
,
6462 "please enable flow director first\n");
6466 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6468 ret
= hclge_fd_check_spec(hdev
, fs
, &unused
, &info
);
6472 ret
= hclge_fd_parse_ring_cookie(hdev
, fs
->ring_cookie
, &dst_vport_id
,
6477 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
6481 ret
= hclge_fd_get_tuple(fs
, rule
, &info
);
6487 rule
->flow_type
= fs
->flow_type
;
6488 rule
->location
= fs
->location
;
6489 rule
->unused_tuple
= unused
;
6490 rule
->vf_id
= dst_vport_id
;
6491 rule
->queue_id
= q_index
;
6492 rule
->action
= action
;
6493 rule
->rule_type
= HCLGE_FD_EP_ACTIVE
;
6495 ret
= hclge_add_fd_entry_common(hdev
, rule
);
6502 static int hclge_del_fd_entry(struct hnae3_handle
*handle
,
6503 struct ethtool_rxnfc
*cmd
)
6505 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6506 struct hclge_dev
*hdev
= vport
->back
;
6507 struct ethtool_rx_flow_spec
*fs
;
6510 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6513 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6515 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6518 spin_lock_bh(&hdev
->fd_rule_lock
);
6519 if (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6520 !test_bit(fs
->location
, hdev
->fd_bmap
)) {
6521 dev_err(&hdev
->pdev
->dev
,
6522 "Delete fail, rule %u is inexistent\n", fs
->location
);
6523 spin_unlock_bh(&hdev
->fd_rule_lock
);
6527 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, fs
->location
,
6532 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, fs
->location
, NULL
);
6535 spin_unlock_bh(&hdev
->fd_rule_lock
);
6539 static void hclge_clear_fd_rules_in_list(struct hclge_dev
*hdev
,
6542 struct hclge_fd_rule
*rule
;
6543 struct hlist_node
*node
;
6546 spin_lock_bh(&hdev
->fd_rule_lock
);
6548 for_each_set_bit(location
, hdev
->fd_bmap
,
6549 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6550 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, location
,
6554 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
6556 hlist_del(&rule
->rule_node
);
6559 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
6560 hdev
->hclge_fd_rule_num
= 0;
6561 bitmap_zero(hdev
->fd_bmap
,
6562 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
6565 spin_unlock_bh(&hdev
->fd_rule_lock
);
6568 static void hclge_del_all_fd_entries(struct hclge_dev
*hdev
)
6570 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6573 hclge_clear_fd_rules_in_list(hdev
, true);
6574 hclge_fd_disable_user_def(hdev
);
6577 static int hclge_restore_fd_entries(struct hnae3_handle
*handle
)
6579 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6580 struct hclge_dev
*hdev
= vport
->back
;
6581 struct hclge_fd_rule
*rule
;
6582 struct hlist_node
*node
;
6584 /* Return ok here, because reset error handling will check this
6585 * return value. If error is returned here, the reset process will
6588 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6591 /* if fd is disabled, should not restore it when reset */
6595 spin_lock_bh(&hdev
->fd_rule_lock
);
6596 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
6597 if (rule
->state
== HCLGE_FD_ACTIVE
)
6598 rule
->state
= HCLGE_FD_TO_ADD
;
6600 spin_unlock_bh(&hdev
->fd_rule_lock
);
6601 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
6606 static int hclge_get_fd_rule_cnt(struct hnae3_handle
*handle
,
6607 struct ethtool_rxnfc
*cmd
)
6609 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6610 struct hclge_dev
*hdev
= vport
->back
;
6612 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
) || hclge_is_cls_flower_active(handle
))
6615 cmd
->rule_cnt
= hdev
->hclge_fd_rule_num
;
6616 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6621 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule
*rule
,
6622 struct ethtool_tcpip4_spec
*spec
,
6623 struct ethtool_tcpip4_spec
*spec_mask
)
6625 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6626 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6627 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6629 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6630 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6631 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6633 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6634 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6635 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6637 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6638 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6639 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6641 spec
->tos
= rule
->tuples
.ip_tos
;
6642 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6643 0 : rule
->tuples_mask
.ip_tos
;
6646 static void hclge_fd_get_ip4_info(struct hclge_fd_rule
*rule
,
6647 struct ethtool_usrip4_spec
*spec
,
6648 struct ethtool_usrip4_spec
*spec_mask
)
6650 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6651 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6652 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6654 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6655 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6656 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6658 spec
->tos
= rule
->tuples
.ip_tos
;
6659 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6660 0 : rule
->tuples_mask
.ip_tos
;
6662 spec
->proto
= rule
->tuples
.ip_proto
;
6663 spec_mask
->proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6664 0 : rule
->tuples_mask
.ip_proto
;
6666 spec
->ip_ver
= ETH_RX_NFC_IP4
;
6669 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule
*rule
,
6670 struct ethtool_tcpip6_spec
*spec
,
6671 struct ethtool_tcpip6_spec
*spec_mask
)
6673 cpu_to_be32_array(spec
->ip6src
,
6674 rule
->tuples
.src_ip
, IPV6_SIZE
);
6675 cpu_to_be32_array(spec
->ip6dst
,
6676 rule
->tuples
.dst_ip
, IPV6_SIZE
);
6677 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6678 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6680 cpu_to_be32_array(spec_mask
->ip6src
, rule
->tuples_mask
.src_ip
,
6683 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6684 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6686 cpu_to_be32_array(spec_mask
->ip6dst
, rule
->tuples_mask
.dst_ip
,
6689 spec
->tclass
= rule
->tuples
.ip_tos
;
6690 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6691 0 : rule
->tuples_mask
.ip_tos
;
6693 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6694 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6695 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6697 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6698 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6699 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6702 static void hclge_fd_get_ip6_info(struct hclge_fd_rule
*rule
,
6703 struct ethtool_usrip6_spec
*spec
,
6704 struct ethtool_usrip6_spec
*spec_mask
)
6706 cpu_to_be32_array(spec
->ip6src
, rule
->tuples
.src_ip
, IPV6_SIZE
);
6707 cpu_to_be32_array(spec
->ip6dst
, rule
->tuples
.dst_ip
, IPV6_SIZE
);
6708 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6709 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6711 cpu_to_be32_array(spec_mask
->ip6src
,
6712 rule
->tuples_mask
.src_ip
, IPV6_SIZE
);
6714 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6715 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6717 cpu_to_be32_array(spec_mask
->ip6dst
,
6718 rule
->tuples_mask
.dst_ip
, IPV6_SIZE
);
6720 spec
->tclass
= rule
->tuples
.ip_tos
;
6721 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6722 0 : rule
->tuples_mask
.ip_tos
;
6724 spec
->l4_proto
= rule
->tuples
.ip_proto
;
6725 spec_mask
->l4_proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6726 0 : rule
->tuples_mask
.ip_proto
;
6729 static void hclge_fd_get_ether_info(struct hclge_fd_rule
*rule
,
6730 struct ethhdr
*spec
,
6731 struct ethhdr
*spec_mask
)
6733 ether_addr_copy(spec
->h_source
, rule
->tuples
.src_mac
);
6734 ether_addr_copy(spec
->h_dest
, rule
->tuples
.dst_mac
);
6736 if (rule
->unused_tuple
& BIT(INNER_SRC_MAC
))
6737 eth_zero_addr(spec_mask
->h_source
);
6739 ether_addr_copy(spec_mask
->h_source
, rule
->tuples_mask
.src_mac
);
6741 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6742 eth_zero_addr(spec_mask
->h_dest
);
6744 ether_addr_copy(spec_mask
->h_dest
, rule
->tuples_mask
.dst_mac
);
6746 spec
->h_proto
= cpu_to_be16(rule
->tuples
.ether_proto
);
6747 spec_mask
->h_proto
= rule
->unused_tuple
& BIT(INNER_ETH_TYPE
) ?
6748 0 : cpu_to_be16(rule
->tuples_mask
.ether_proto
);
6751 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec
*fs
,
6752 struct hclge_fd_rule
*rule
)
6754 if ((rule
->unused_tuple
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
) ==
6755 HCLGE_FD_TUPLE_USER_DEF_TUPLES
) {
6756 fs
->h_ext
.data
[0] = 0;
6757 fs
->h_ext
.data
[1] = 0;
6758 fs
->m_ext
.data
[0] = 0;
6759 fs
->m_ext
.data
[1] = 0;
6761 fs
->h_ext
.data
[0] = cpu_to_be32(rule
->ep
.user_def
.offset
);
6762 fs
->h_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data
);
6764 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK
);
6765 fs
->m_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data_mask
);
6769 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec
*fs
,
6770 struct hclge_fd_rule
*rule
)
6772 if (fs
->flow_type
& FLOW_EXT
) {
6773 fs
->h_ext
.vlan_tci
= cpu_to_be16(rule
->tuples
.vlan_tag1
);
6774 fs
->m_ext
.vlan_tci
=
6775 rule
->unused_tuple
& BIT(INNER_VLAN_TAG_FST
) ?
6776 0 : cpu_to_be16(rule
->tuples_mask
.vlan_tag1
);
6778 hclge_fd_get_user_def_info(fs
, rule
);
6781 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6782 ether_addr_copy(fs
->h_ext
.h_dest
, rule
->tuples
.dst_mac
);
6783 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6784 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
6786 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
6787 rule
->tuples_mask
.dst_mac
);
6791 static struct hclge_fd_rule
*hclge_get_fd_rule(struct hclge_dev
*hdev
,
6794 struct hclge_fd_rule
*rule
= NULL
;
6795 struct hlist_node
*node2
;
6797 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
6798 if (rule
->location
== location
)
6800 else if (rule
->location
> location
)
6807 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec
*fs
,
6808 struct hclge_fd_rule
*rule
)
6810 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
6811 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
6815 fs
->ring_cookie
= rule
->queue_id
;
6816 vf_id
= rule
->vf_id
;
6817 vf_id
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
6818 fs
->ring_cookie
|= vf_id
;
6822 static int hclge_get_fd_rule_info(struct hnae3_handle
*handle
,
6823 struct ethtool_rxnfc
*cmd
)
6825 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6826 struct hclge_fd_rule
*rule
= NULL
;
6827 struct hclge_dev
*hdev
= vport
->back
;
6828 struct ethtool_rx_flow_spec
*fs
;
6830 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6833 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6835 spin_lock_bh(&hdev
->fd_rule_lock
);
6837 rule
= hclge_get_fd_rule(hdev
, fs
->location
);
6839 spin_unlock_bh(&hdev
->fd_rule_lock
);
6843 fs
->flow_type
= rule
->flow_type
;
6844 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
6848 hclge_fd_get_tcpip4_info(rule
, &fs
->h_u
.tcp_ip4_spec
,
6849 &fs
->m_u
.tcp_ip4_spec
);
6852 hclge_fd_get_ip4_info(rule
, &fs
->h_u
.usr_ip4_spec
,
6853 &fs
->m_u
.usr_ip4_spec
);
6858 hclge_fd_get_tcpip6_info(rule
, &fs
->h_u
.tcp_ip6_spec
,
6859 &fs
->m_u
.tcp_ip6_spec
);
6861 case IPV6_USER_FLOW
:
6862 hclge_fd_get_ip6_info(rule
, &fs
->h_u
.usr_ip6_spec
,
6863 &fs
->m_u
.usr_ip6_spec
);
6865 /* The flow type of fd rule has been checked before adding in to rule
6866 * list. As other flow types have been handled, it must be ETHER_FLOW
6867 * for the default case
6870 hclge_fd_get_ether_info(rule
, &fs
->h_u
.ether_spec
,
6871 &fs
->m_u
.ether_spec
);
6875 hclge_fd_get_ext_info(fs
, rule
);
6877 hclge_fd_get_ring_cookie(fs
, rule
);
6879 spin_unlock_bh(&hdev
->fd_rule_lock
);
6884 static int hclge_get_all_rules(struct hnae3_handle
*handle
,
6885 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
6887 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6888 struct hclge_dev
*hdev
= vport
->back
;
6889 struct hclge_fd_rule
*rule
;
6890 struct hlist_node
*node2
;
6893 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6896 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6898 spin_lock_bh(&hdev
->fd_rule_lock
);
6899 hlist_for_each_entry_safe(rule
, node2
,
6900 &hdev
->fd_rule_list
, rule_node
) {
6901 if (cnt
== cmd
->rule_cnt
) {
6902 spin_unlock_bh(&hdev
->fd_rule_lock
);
6906 if (rule
->state
== HCLGE_FD_TO_DEL
)
6909 rule_locs
[cnt
] = rule
->location
;
6913 spin_unlock_bh(&hdev
->fd_rule_lock
);
6915 cmd
->rule_cnt
= cnt
;
6920 static void hclge_fd_get_flow_tuples(const struct flow_keys
*fkeys
,
6921 struct hclge_fd_rule_tuples
*tuples
)
6923 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6924 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6926 tuples
->ether_proto
= be16_to_cpu(fkeys
->basic
.n_proto
);
6927 tuples
->ip_proto
= fkeys
->basic
.ip_proto
;
6928 tuples
->dst_port
= be16_to_cpu(fkeys
->ports
.dst
);
6930 if (fkeys
->basic
.n_proto
== htons(ETH_P_IP
)) {
6931 tuples
->src_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.src
);
6932 tuples
->dst_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.dst
);
6936 for (i
= 0; i
< IPV6_SIZE
; i
++) {
6937 tuples
->src_ip
[i
] = be32_to_cpu(flow_ip6_src
[i
]);
6938 tuples
->dst_ip
[i
] = be32_to_cpu(flow_ip6_dst
[i
]);
6943 /* traverse all rules, check whether an existed rule has the same tuples */
6944 static struct hclge_fd_rule
*
6945 hclge_fd_search_flow_keys(struct hclge_dev
*hdev
,
6946 const struct hclge_fd_rule_tuples
*tuples
)
6948 struct hclge_fd_rule
*rule
= NULL
;
6949 struct hlist_node
*node
;
6951 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
6952 if (!memcmp(tuples
, &rule
->tuples
, sizeof(*tuples
)))
6959 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples
*tuples
,
6960 struct hclge_fd_rule
*rule
)
6962 rule
->unused_tuple
= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
6963 BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_IP_TOS
) |
6964 BIT(INNER_SRC_PORT
);
6967 rule
->rule_type
= HCLGE_FD_ARFS_ACTIVE
;
6968 rule
->state
= HCLGE_FD_TO_ADD
;
6969 if (tuples
->ether_proto
== ETH_P_IP
) {
6970 if (tuples
->ip_proto
== IPPROTO_TCP
)
6971 rule
->flow_type
= TCP_V4_FLOW
;
6973 rule
->flow_type
= UDP_V4_FLOW
;
6975 if (tuples
->ip_proto
== IPPROTO_TCP
)
6976 rule
->flow_type
= TCP_V6_FLOW
;
6978 rule
->flow_type
= UDP_V6_FLOW
;
6980 memcpy(&rule
->tuples
, tuples
, sizeof(rule
->tuples
));
6981 memset(&rule
->tuples_mask
, 0xFF, sizeof(rule
->tuples_mask
));
6984 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle
*handle
, u16 queue_id
,
6985 u16 flow_id
, struct flow_keys
*fkeys
)
6987 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6988 struct hclge_fd_rule_tuples new_tuples
= {};
6989 struct hclge_dev
*hdev
= vport
->back
;
6990 struct hclge_fd_rule
*rule
;
6993 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6996 /* when there is already fd rule existed add by user,
6997 * arfs should not work
6999 spin_lock_bh(&hdev
->fd_rule_lock
);
7000 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
&&
7001 hdev
->fd_active_type
!= HCLGE_FD_RULE_NONE
) {
7002 spin_unlock_bh(&hdev
->fd_rule_lock
);
7006 hclge_fd_get_flow_tuples(fkeys
, &new_tuples
);
7008 /* check is there flow director filter existed for this flow,
7009 * if not, create a new filter for it;
7010 * if filter exist with different queue id, modify the filter;
7011 * if filter exist with same queue id, do nothing
7013 rule
= hclge_fd_search_flow_keys(hdev
, &new_tuples
);
7015 bit_id
= find_first_zero_bit(hdev
->fd_bmap
, MAX_FD_FILTER_NUM
);
7016 if (bit_id
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7017 spin_unlock_bh(&hdev
->fd_rule_lock
);
7021 rule
= kzalloc(sizeof(*rule
), GFP_ATOMIC
);
7023 spin_unlock_bh(&hdev
->fd_rule_lock
);
7027 rule
->location
= bit_id
;
7028 rule
->arfs
.flow_id
= flow_id
;
7029 rule
->queue_id
= queue_id
;
7030 hclge_fd_build_arfs_rule(&new_tuples
, rule
);
7031 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
7032 hdev
->fd_active_type
= HCLGE_FD_ARFS_ACTIVE
;
7033 } else if (rule
->queue_id
!= queue_id
) {
7034 rule
->queue_id
= queue_id
;
7035 rule
->state
= HCLGE_FD_TO_ADD
;
7036 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7037 hclge_task_schedule(hdev
, 0);
7039 spin_unlock_bh(&hdev
->fd_rule_lock
);
7040 return rule
->location
;
7043 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
)
7045 #ifdef CONFIG_RFS_ACCEL
7046 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
7047 struct hclge_fd_rule
*rule
;
7048 struct hlist_node
*node
;
7050 spin_lock_bh(&hdev
->fd_rule_lock
);
7051 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
) {
7052 spin_unlock_bh(&hdev
->fd_rule_lock
);
7055 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7056 if (rule
->state
!= HCLGE_FD_ACTIVE
)
7058 if (rps_may_expire_flow(handle
->netdev
, rule
->queue_id
,
7059 rule
->arfs
.flow_id
, rule
->location
)) {
7060 rule
->state
= HCLGE_FD_TO_DEL
;
7061 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7064 spin_unlock_bh(&hdev
->fd_rule_lock
);
7068 /* make sure being called after lock up with fd_rule_lock */
7069 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
)
7071 #ifdef CONFIG_RFS_ACCEL
7072 struct hclge_fd_rule
*rule
;
7073 struct hlist_node
*node
;
7076 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
)
7079 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7080 switch (rule
->state
) {
7081 case HCLGE_FD_TO_DEL
:
7082 case HCLGE_FD_ACTIVE
:
7083 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7084 rule
->location
, NULL
, false);
7088 case HCLGE_FD_TO_ADD
:
7089 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7090 hlist_del(&rule
->rule_node
);
7097 hclge_sync_fd_state(hdev
);
7103 static void hclge_get_cls_key_basic(const struct flow_rule
*flow
,
7104 struct hclge_fd_rule
*rule
)
7106 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_BASIC
)) {
7107 struct flow_match_basic match
;
7108 u16 ethtype_key
, ethtype_mask
;
7110 flow_rule_match_basic(flow
, &match
);
7111 ethtype_key
= ntohs(match
.key
->n_proto
);
7112 ethtype_mask
= ntohs(match
.mask
->n_proto
);
7114 if (ethtype_key
== ETH_P_ALL
) {
7118 rule
->tuples
.ether_proto
= ethtype_key
;
7119 rule
->tuples_mask
.ether_proto
= ethtype_mask
;
7120 rule
->tuples
.ip_proto
= match
.key
->ip_proto
;
7121 rule
->tuples_mask
.ip_proto
= match
.mask
->ip_proto
;
7123 rule
->unused_tuple
|= BIT(INNER_IP_PROTO
);
7124 rule
->unused_tuple
|= BIT(INNER_ETH_TYPE
);
7128 static void hclge_get_cls_key_mac(const struct flow_rule
*flow
,
7129 struct hclge_fd_rule
*rule
)
7131 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7132 struct flow_match_eth_addrs match
;
7134 flow_rule_match_eth_addrs(flow
, &match
);
7135 ether_addr_copy(rule
->tuples
.dst_mac
, match
.key
->dst
);
7136 ether_addr_copy(rule
->tuples_mask
.dst_mac
, match
.mask
->dst
);
7137 ether_addr_copy(rule
->tuples
.src_mac
, match
.key
->src
);
7138 ether_addr_copy(rule
->tuples_mask
.src_mac
, match
.mask
->src
);
7140 rule
->unused_tuple
|= BIT(INNER_DST_MAC
);
7141 rule
->unused_tuple
|= BIT(INNER_SRC_MAC
);
7145 static void hclge_get_cls_key_vlan(const struct flow_rule
*flow
,
7146 struct hclge_fd_rule
*rule
)
7148 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_VLAN
)) {
7149 struct flow_match_vlan match
;
7151 flow_rule_match_vlan(flow
, &match
);
7152 rule
->tuples
.vlan_tag1
= match
.key
->vlan_id
|
7153 (match
.key
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7154 rule
->tuples_mask
.vlan_tag1
= match
.mask
->vlan_id
|
7155 (match
.mask
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7157 rule
->unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
7161 static void hclge_get_cls_key_ip(const struct flow_rule
*flow
,
7162 struct hclge_fd_rule
*rule
)
7166 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7167 struct flow_match_control match
;
7169 flow_rule_match_control(flow
, &match
);
7170 addr_type
= match
.key
->addr_type
;
7173 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7174 struct flow_match_ipv4_addrs match
;
7176 flow_rule_match_ipv4_addrs(flow
, &match
);
7177 rule
->tuples
.src_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->src
);
7178 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
7179 be32_to_cpu(match
.mask
->src
);
7180 rule
->tuples
.dst_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->dst
);
7181 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
7182 be32_to_cpu(match
.mask
->dst
);
7183 } else if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7184 struct flow_match_ipv6_addrs match
;
7186 flow_rule_match_ipv6_addrs(flow
, &match
);
7187 be32_to_cpu_array(rule
->tuples
.src_ip
, match
.key
->src
.s6_addr32
,
7189 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
7190 match
.mask
->src
.s6_addr32
, IPV6_SIZE
);
7191 be32_to_cpu_array(rule
->tuples
.dst_ip
, match
.key
->dst
.s6_addr32
,
7193 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
7194 match
.mask
->dst
.s6_addr32
, IPV6_SIZE
);
7196 rule
->unused_tuple
|= BIT(INNER_SRC_IP
);
7197 rule
->unused_tuple
|= BIT(INNER_DST_IP
);
7201 static void hclge_get_cls_key_port(const struct flow_rule
*flow
,
7202 struct hclge_fd_rule
*rule
)
7204 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_PORTS
)) {
7205 struct flow_match_ports match
;
7207 flow_rule_match_ports(flow
, &match
);
7209 rule
->tuples
.src_port
= be16_to_cpu(match
.key
->src
);
7210 rule
->tuples_mask
.src_port
= be16_to_cpu(match
.mask
->src
);
7211 rule
->tuples
.dst_port
= be16_to_cpu(match
.key
->dst
);
7212 rule
->tuples_mask
.dst_port
= be16_to_cpu(match
.mask
->dst
);
7214 rule
->unused_tuple
|= BIT(INNER_SRC_PORT
);
7215 rule
->unused_tuple
|= BIT(INNER_DST_PORT
);
7219 static int hclge_parse_cls_flower(struct hclge_dev
*hdev
,
7220 struct flow_cls_offload
*cls_flower
,
7221 struct hclge_fd_rule
*rule
)
7223 struct flow_rule
*flow
= flow_cls_offload_flow_rule(cls_flower
);
7224 struct flow_dissector
*dissector
= flow
->match
.dissector
;
7226 if (dissector
->used_keys
&
7227 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL
) |
7228 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC
) |
7229 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7230 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN
) |
7231 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7232 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7233 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS
))) {
7234 dev_err(&hdev
->pdev
->dev
, "unsupported key set: %#llx\n",
7235 dissector
->used_keys
);
7239 hclge_get_cls_key_basic(flow
, rule
);
7240 hclge_get_cls_key_mac(flow
, rule
);
7241 hclge_get_cls_key_vlan(flow
, rule
);
7242 hclge_get_cls_key_ip(flow
, rule
);
7243 hclge_get_cls_key_port(flow
, rule
);
7248 static int hclge_check_cls_flower(struct hclge_dev
*hdev
,
7249 struct flow_cls_offload
*cls_flower
, int tc
)
7251 u32 prio
= cls_flower
->common
.prio
;
7253 if (tc
< 0 || tc
> hdev
->tc_max
) {
7254 dev_err(&hdev
->pdev
->dev
, "invalid traffic class\n");
7259 prio
> hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7260 dev_err(&hdev
->pdev
->dev
,
7261 "prio %u should be in range[1, %u]\n",
7262 prio
, hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
7266 if (test_bit(prio
- 1, hdev
->fd_bmap
)) {
7267 dev_err(&hdev
->pdev
->dev
, "prio %u is already used\n", prio
);
7273 static int hclge_add_cls_flower(struct hnae3_handle
*handle
,
7274 struct flow_cls_offload
*cls_flower
,
7277 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7278 struct hclge_dev
*hdev
= vport
->back
;
7279 struct hclge_fd_rule
*rule
;
7282 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
7283 dev_err(&hdev
->pdev
->dev
,
7284 "cls flower is not supported\n");
7288 ret
= hclge_check_cls_flower(hdev
, cls_flower
, tc
);
7290 dev_err(&hdev
->pdev
->dev
,
7291 "failed to check cls flower params, ret = %d\n", ret
);
7295 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
7299 ret
= hclge_parse_cls_flower(hdev
, cls_flower
, rule
);
7305 rule
->action
= HCLGE_FD_ACTION_SELECT_TC
;
7306 rule
->cls_flower
.tc
= tc
;
7307 rule
->location
= cls_flower
->common
.prio
- 1;
7309 rule
->cls_flower
.cookie
= cls_flower
->cookie
;
7310 rule
->rule_type
= HCLGE_FD_TC_FLOWER_ACTIVE
;
7312 ret
= hclge_add_fd_entry_common(hdev
, rule
);
7319 static struct hclge_fd_rule
*hclge_find_cls_flower(struct hclge_dev
*hdev
,
7320 unsigned long cookie
)
7322 struct hclge_fd_rule
*rule
;
7323 struct hlist_node
*node
;
7325 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7326 if (rule
->cls_flower
.cookie
== cookie
)
7333 static int hclge_del_cls_flower(struct hnae3_handle
*handle
,
7334 struct flow_cls_offload
*cls_flower
)
7336 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7337 struct hclge_dev
*hdev
= vport
->back
;
7338 struct hclge_fd_rule
*rule
;
7341 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7344 spin_lock_bh(&hdev
->fd_rule_lock
);
7346 rule
= hclge_find_cls_flower(hdev
, cls_flower
->cookie
);
7348 spin_unlock_bh(&hdev
->fd_rule_lock
);
7352 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, rule
->location
,
7355 /* if tcam config fail, set rule state to TO_DEL,
7356 * so the rule will be deleted when periodic
7357 * task being scheduled.
7359 hclge_update_fd_list(hdev
, HCLGE_FD_TO_DEL
, rule
->location
, NULL
);
7360 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7361 spin_unlock_bh(&hdev
->fd_rule_lock
);
7365 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, rule
->location
, NULL
);
7366 spin_unlock_bh(&hdev
->fd_rule_lock
);
7371 static void hclge_sync_fd_list(struct hclge_dev
*hdev
, struct hlist_head
*hlist
)
7373 struct hclge_fd_rule
*rule
;
7374 struct hlist_node
*node
;
7377 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
))
7380 spin_lock_bh(&hdev
->fd_rule_lock
);
7382 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
7383 switch (rule
->state
) {
7384 case HCLGE_FD_TO_ADD
:
7385 ret
= hclge_fd_config_rule(hdev
, rule
);
7388 rule
->state
= HCLGE_FD_ACTIVE
;
7390 case HCLGE_FD_TO_DEL
:
7391 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7392 rule
->location
, NULL
, false);
7395 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7396 hclge_fd_free_node(hdev
, rule
);
7405 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7407 spin_unlock_bh(&hdev
->fd_rule_lock
);
7410 static void hclge_sync_fd_table(struct hclge_dev
*hdev
)
7412 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7415 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
)) {
7416 bool clear_list
= hdev
->fd_active_type
== HCLGE_FD_ARFS_ACTIVE
;
7418 hclge_clear_fd_rules_in_list(hdev
, clear_list
);
7421 hclge_sync_fd_user_def_cfg(hdev
, false);
7423 hclge_sync_fd_list(hdev
, &hdev
->fd_rule_list
);
7426 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
)
7428 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7429 struct hclge_dev
*hdev
= vport
->back
;
7431 return hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) ||
7432 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
);
7435 static bool hclge_get_cmdq_stat(struct hnae3_handle
*handle
)
7437 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7438 struct hclge_dev
*hdev
= vport
->back
;
7440 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
7443 static bool hclge_ae_dev_resetting(struct hnae3_handle
*handle
)
7445 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7446 struct hclge_dev
*hdev
= vport
->back
;
7448 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
7451 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
7453 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7454 struct hclge_dev
*hdev
= vport
->back
;
7456 return hdev
->rst_stats
.hw_reset_done_cnt
;
7459 static void hclge_enable_fd(struct hnae3_handle
*handle
, bool enable
)
7461 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7462 struct hclge_dev
*hdev
= vport
->back
;
7464 hdev
->fd_en
= enable
;
7467 set_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
);
7469 hclge_restore_fd_entries(handle
);
7471 hclge_task_schedule(hdev
, 0);
7474 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
7476 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7478 struct hclge_desc desc
;
7479 struct hclge_config_mac_mode_cmd
*req
=
7480 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
7484 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
7487 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, 1U);
7488 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, 1U);
7489 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, 1U);
7490 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, 1U);
7491 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, 1U);
7492 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, 1U);
7493 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, 1U);
7494 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, 1U);
7495 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, 1U);
7496 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, 1U);
7499 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7501 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7503 dev_err(&hdev
->pdev
->dev
,
7504 "mac enable fail, ret =%d.\n", ret
);
7509 hclge_mac_link_status_wait(hdev
, HCLGE_LINK_STATUS_DOWN
,
7510 HCLGE_LINK_STATUS_WAIT_CNT
);
7513 static int hclge_config_switch_param(struct hclge_dev
*hdev
, int vfid
,
7514 u8 switch_param
, u8 param_mask
)
7516 struct hclge_mac_vlan_switch_cmd
*req
;
7517 struct hclge_desc desc
;
7521 func_id
= hclge_get_port_number(HOST_PORT
, 0, vfid
, 0);
7522 req
= (struct hclge_mac_vlan_switch_cmd
*)desc
.data
;
7524 /* read current config parameter */
7525 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM
,
7527 req
->roce_sel
= HCLGE_MAC_VLAN_NIC_SEL
;
7528 req
->func_id
= cpu_to_le32(func_id
);
7530 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7532 dev_err(&hdev
->pdev
->dev
,
7533 "read mac vlan switch parameter fail, ret = %d\n", ret
);
7537 /* modify and write new config parameter */
7538 hclge_comm_cmd_reuse_desc(&desc
, false);
7539 req
->switch_param
= (req
->switch_param
& param_mask
) | switch_param
;
7540 req
->param_mask
= param_mask
;
7542 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7544 dev_err(&hdev
->pdev
->dev
,
7545 "set mac vlan switch parameter fail, ret = %d\n", ret
);
7549 static void hclge_phy_link_status_wait(struct hclge_dev
*hdev
,
7552 #define HCLGE_PHY_LINK_STATUS_NUM 200
7554 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7559 ret
= phy_read_status(phydev
);
7561 dev_err(&hdev
->pdev
->dev
,
7562 "phy update link status fail, ret = %d\n", ret
);
7566 if (phydev
->link
== link_ret
)
7569 msleep(HCLGE_LINK_STATUS_MS
);
7570 } while (++i
< HCLGE_PHY_LINK_STATUS_NUM
);
7573 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
7581 ret
= hclge_get_mac_link_status(hdev
, &link_status
);
7584 if (link_status
== link_ret
)
7587 msleep(HCLGE_LINK_STATUS_MS
);
7588 } while (++i
< wait_cnt
);
7592 static int hclge_mac_phy_link_status_wait(struct hclge_dev
*hdev
, bool en
,
7595 #define HCLGE_MAC_LINK_STATUS_NUM 100
7599 link_ret
= en
? HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
7602 hclge_phy_link_status_wait(hdev
, link_ret
);
7604 return hclge_mac_link_status_wait(hdev
, link_ret
,
7605 HCLGE_MAC_LINK_STATUS_NUM
);
7608 static int hclge_set_app_loopback(struct hclge_dev
*hdev
, bool en
)
7610 struct hclge_config_mac_mode_cmd
*req
;
7611 struct hclge_desc desc
;
7615 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
7616 /* 1 Read out the MAC mode config at first */
7617 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
7618 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7620 dev_err(&hdev
->pdev
->dev
,
7621 "mac loopback get fail, ret =%d.\n", ret
);
7625 /* 2 Then setup the loopback flag */
7626 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
7627 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
7629 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7631 /* 3 Config mac work mode with loopback flag
7632 * and its original configure parameters
7634 hclge_comm_cmd_reuse_desc(&desc
, false);
7635 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7637 dev_err(&hdev
->pdev
->dev
,
7638 "mac loopback set fail, ret =%d.\n", ret
);
7642 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev
*hdev
, bool en
,
7643 enum hnae3_loop loop_mode
)
7645 struct hclge_common_lb_cmd
*req
;
7646 struct hclge_desc desc
;
7650 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7651 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
, false);
7653 switch (loop_mode
) {
7654 case HNAE3_LOOP_SERIAL_SERDES
:
7655 loop_mode_b
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
7657 case HNAE3_LOOP_PARALLEL_SERDES
:
7658 loop_mode_b
= HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B
;
7660 case HNAE3_LOOP_PHY
:
7661 loop_mode_b
= HCLGE_CMD_GE_PHY_INNER_LOOP_B
;
7664 dev_err(&hdev
->pdev
->dev
,
7665 "unsupported loopback mode %d\n", loop_mode
);
7669 req
->mask
= loop_mode_b
;
7671 req
->enable
= loop_mode_b
;
7673 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7675 dev_err(&hdev
->pdev
->dev
,
7676 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7682 static int hclge_cfg_common_loopback_wait(struct hclge_dev
*hdev
)
7684 #define HCLGE_COMMON_LB_RETRY_MS 10
7685 #define HCLGE_COMMON_LB_RETRY_NUM 100
7687 struct hclge_common_lb_cmd
*req
;
7688 struct hclge_desc desc
;
7692 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7695 msleep(HCLGE_COMMON_LB_RETRY_MS
);
7696 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
,
7698 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7700 dev_err(&hdev
->pdev
->dev
,
7701 "failed to get loopback done status, ret = %d\n",
7705 } while (++i
< HCLGE_COMMON_LB_RETRY_NUM
&&
7706 !(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
));
7708 if (!(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
)) {
7709 dev_err(&hdev
->pdev
->dev
, "wait loopback timeout\n");
7711 } else if (!(req
->result
& HCLGE_CMD_COMMON_LB_SUCCESS_B
)) {
7712 dev_err(&hdev
->pdev
->dev
, "failed to do loopback test\n");
7719 static int hclge_cfg_common_loopback(struct hclge_dev
*hdev
, bool en
,
7720 enum hnae3_loop loop_mode
)
7724 ret
= hclge_cfg_common_loopback_cmd_send(hdev
, en
, loop_mode
);
7728 return hclge_cfg_common_loopback_wait(hdev
);
7731 static int hclge_set_common_loopback(struct hclge_dev
*hdev
, bool en
,
7732 enum hnae3_loop loop_mode
)
7736 ret
= hclge_cfg_common_loopback(hdev
, en
, loop_mode
);
7740 hclge_cfg_mac_mode(hdev
, en
);
7742 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, false);
7744 dev_err(&hdev
->pdev
->dev
,
7745 "serdes loopback config mac mode timeout\n");
7750 static int hclge_enable_phy_loopback(struct hclge_dev
*hdev
,
7751 struct phy_device
*phydev
)
7755 if (!phydev
->suspended
) {
7756 ret
= phy_suspend(phydev
);
7761 ret
= phy_resume(phydev
);
7765 return phy_loopback(phydev
, true);
7768 static int hclge_disable_phy_loopback(struct hclge_dev
*hdev
,
7769 struct phy_device
*phydev
)
7773 ret
= phy_loopback(phydev
, false);
7777 return phy_suspend(phydev
);
7780 static int hclge_set_phy_loopback(struct hclge_dev
*hdev
, bool en
)
7782 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7786 if (hnae3_dev_phy_imp_supported(hdev
))
7787 return hclge_set_common_loopback(hdev
, en
,
7793 ret
= hclge_enable_phy_loopback(hdev
, phydev
);
7795 ret
= hclge_disable_phy_loopback(hdev
, phydev
);
7797 dev_err(&hdev
->pdev
->dev
,
7798 "set phy loopback fail, ret = %d\n", ret
);
7802 hclge_cfg_mac_mode(hdev
, en
);
7804 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, true);
7806 dev_err(&hdev
->pdev
->dev
,
7807 "phy loopback config mac mode timeout\n");
7812 static int hclge_tqp_enable_cmd_send(struct hclge_dev
*hdev
, u16 tqp_id
,
7813 u16 stream_id
, bool enable
)
7815 struct hclge_desc desc
;
7816 struct hclge_cfg_com_tqp_queue_cmd
*req
=
7817 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
7819 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
7820 req
->tqp_id
= cpu_to_le16(tqp_id
);
7821 req
->stream_id
= cpu_to_le16(stream_id
);
7823 req
->enable
|= 1U << HCLGE_TQP_ENABLE_B
;
7825 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7828 static int hclge_tqp_enable(struct hnae3_handle
*handle
, bool enable
)
7830 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7831 struct hclge_dev
*hdev
= vport
->back
;
7835 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
7836 ret
= hclge_tqp_enable_cmd_send(hdev
, i
, 0, enable
);
7843 static int hclge_set_loopback(struct hnae3_handle
*handle
,
7844 enum hnae3_loop loop_mode
, bool en
)
7846 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7847 struct hclge_dev
*hdev
= vport
->back
;
7850 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7851 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7852 * the same, the packets are looped back in the SSU. If SSU loopback
7853 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7855 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
7856 u8 switch_param
= en
? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B
);
7858 ret
= hclge_config_switch_param(hdev
, PF_VPORT_ID
, switch_param
,
7859 HCLGE_SWITCH_ALW_LPBK_MASK
);
7864 switch (loop_mode
) {
7865 case HNAE3_LOOP_APP
:
7866 ret
= hclge_set_app_loopback(hdev
, en
);
7868 case HNAE3_LOOP_SERIAL_SERDES
:
7869 case HNAE3_LOOP_PARALLEL_SERDES
:
7870 ret
= hclge_set_common_loopback(hdev
, en
, loop_mode
);
7872 case HNAE3_LOOP_PHY
:
7873 ret
= hclge_set_phy_loopback(hdev
, en
);
7875 case HNAE3_LOOP_EXTERNAL
:
7879 dev_err(&hdev
->pdev
->dev
,
7880 "loop_mode %d is not supported\n", loop_mode
);
7887 ret
= hclge_tqp_enable(handle
, en
);
7889 dev_err(&hdev
->pdev
->dev
, "failed to %s tqp in loopback, ret = %d\n",
7890 en
? "enable" : "disable", ret
);
7895 static int hclge_set_default_loopback(struct hclge_dev
*hdev
)
7899 ret
= hclge_set_app_loopback(hdev
, false);
7903 ret
= hclge_cfg_common_loopback(hdev
, false, HNAE3_LOOP_SERIAL_SERDES
);
7907 return hclge_cfg_common_loopback(hdev
, false,
7908 HNAE3_LOOP_PARALLEL_SERDES
);
7911 static void hclge_flush_link_update(struct hclge_dev
*hdev
)
7913 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7915 unsigned long last
= hdev
->serv_processed_cnt
;
7918 while (test_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
) &&
7919 i
++ < HCLGE_FLUSH_LINK_TIMEOUT
&&
7920 last
== hdev
->serv_processed_cnt
)
7924 static void hclge_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
7926 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7927 struct hclge_dev
*hdev
= vport
->back
;
7930 hclge_task_schedule(hdev
, 0);
7932 /* Set the DOWN flag here to disable link updating */
7933 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7935 /* flush memory to make sure DOWN is seen by service task */
7936 smp_mb__before_atomic();
7937 hclge_flush_link_update(hdev
);
7941 static int hclge_ae_start(struct hnae3_handle
*handle
)
7943 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7944 struct hclge_dev
*hdev
= vport
->back
;
7947 hclge_cfg_mac_mode(hdev
, true);
7948 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7949 hdev
->hw
.mac
.link
= 0;
7951 /* reset tqp stats */
7952 hclge_comm_reset_tqp_stats(handle
);
7954 hclge_mac_start_phy(hdev
);
7959 static void hclge_ae_stop(struct hnae3_handle
*handle
)
7961 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7962 struct hclge_dev
*hdev
= vport
->back
;
7964 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7965 spin_lock_bh(&hdev
->fd_rule_lock
);
7966 hclge_clear_arfs_rules(hdev
);
7967 spin_unlock_bh(&hdev
->fd_rule_lock
);
7969 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7970 * so it only need to stop phy here.
7972 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
)) {
7973 hclge_pfc_pause_en_cfg(hdev
, HCLGE_PFC_TX_RX_DISABLE
,
7975 if (hdev
->reset_type
!= HNAE3_FUNC_RESET
&&
7976 hdev
->reset_type
!= HNAE3_FLR_RESET
) {
7977 hclge_mac_stop_phy(hdev
);
7978 hclge_update_link_status(hdev
);
7983 hclge_reset_tqp(handle
);
7985 hclge_config_mac_tnl_int(hdev
, false);
7988 hclge_cfg_mac_mode(hdev
, false);
7990 hclge_mac_stop_phy(hdev
);
7992 /* reset tqp stats */
7993 hclge_comm_reset_tqp_stats(handle
);
7994 hclge_update_link_status(hdev
);
7997 int hclge_vport_start(struct hclge_vport
*vport
)
7999 struct hclge_dev
*hdev
= vport
->back
;
8001 set_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
8002 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
8003 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
8004 vport
->last_active_jiffies
= jiffies
;
8005 vport
->need_notify
= 0;
8007 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
)) {
8008 if (vport
->vport_id
) {
8009 hclge_restore_mac_table_common(vport
);
8010 hclge_restore_vport_vlan_table(vport
);
8012 hclge_restore_hw_table(hdev
);
8016 clear_bit(vport
->vport_id
, hdev
->vport_config_block
);
8021 void hclge_vport_stop(struct hclge_vport
*vport
)
8023 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
8024 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
8025 vport
->need_notify
= 0;
8028 static int hclge_client_start(struct hnae3_handle
*handle
)
8030 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8032 return hclge_vport_start(vport
);
8035 static void hclge_client_stop(struct hnae3_handle
*handle
)
8037 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8039 hclge_vport_stop(vport
);
8042 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
8043 u16 cmdq_resp
, u8 resp_code
,
8044 enum hclge_mac_vlan_tbl_opcode op
)
8046 struct hclge_dev
*hdev
= vport
->back
;
8049 dev_err(&hdev
->pdev
->dev
,
8050 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8055 if (op
== HCLGE_MAC_VLAN_ADD
) {
8056 if (!resp_code
|| resp_code
== 1)
8058 else if (resp_code
== HCLGE_ADD_UC_OVERFLOW
||
8059 resp_code
== HCLGE_ADD_MC_OVERFLOW
)
8062 dev_err(&hdev
->pdev
->dev
,
8063 "add mac addr failed for undefined, code=%u.\n",
8066 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
8069 } else if (resp_code
== 1) {
8070 dev_dbg(&hdev
->pdev
->dev
,
8071 "remove mac addr failed for miss.\n");
8075 dev_err(&hdev
->pdev
->dev
,
8076 "remove mac addr failed for undefined, code=%u.\n",
8079 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
8082 } else if (resp_code
== 1) {
8083 dev_dbg(&hdev
->pdev
->dev
,
8084 "lookup mac addr failed for miss.\n");
8088 dev_err(&hdev
->pdev
->dev
,
8089 "lookup mac addr failed for undefined, code=%u.\n",
8094 dev_err(&hdev
->pdev
->dev
,
8095 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op
);
8100 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
8102 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8104 unsigned int word_num
;
8105 unsigned int bit_num
;
8107 if (vfid
> 255 || vfid
< 0)
8110 if (vfid
>= 0 && vfid
< HCLGE_VF_NUM_IN_FIRST_DESC
) {
8111 word_num
= vfid
/ 32;
8112 bit_num
= vfid
% 32;
8114 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8116 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8118 word_num
= (vfid
- HCLGE_VF_NUM_IN_FIRST_DESC
) / 32;
8119 bit_num
= vfid
% 32;
8121 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8123 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8129 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
8131 #define HCLGE_DESC_NUMBER 3
8132 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8135 for (i
= 1; i
< HCLGE_DESC_NUMBER
; i
++)
8136 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
8137 if (desc
[i
].data
[j
])
8143 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
8144 const u8
*addr
, bool is_mc
)
8146 const unsigned char *mac_addr
= addr
;
8147 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
8148 (mac_addr
[0]) | (mac_addr
[1] << 8);
8149 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
8151 hnae3_set_bit(new_req
->flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8153 hnae3_set_bit(new_req
->entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
8154 hnae3_set_bit(new_req
->mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8157 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
8158 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
8161 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
8162 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
8164 struct hclge_dev
*hdev
= vport
->back
;
8165 struct hclge_desc desc
;
8170 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
8172 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8174 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8176 dev_err(&hdev
->pdev
->dev
,
8177 "del mac addr failed for cmd_send, ret =%d.\n",
8181 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8182 retval
= le16_to_cpu(desc
.retval
);
8184 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8185 HCLGE_MAC_VLAN_REMOVE
);
8188 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
8189 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8190 struct hclge_desc
*desc
,
8193 struct hclge_dev
*hdev
= vport
->back
;
8198 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
8200 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8201 memcpy(desc
[0].data
,
8203 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8204 hclge_cmd_setup_basic_desc(&desc
[1],
8205 HCLGE_OPC_MAC_VLAN_ADD
,
8207 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8208 hclge_cmd_setup_basic_desc(&desc
[2],
8209 HCLGE_OPC_MAC_VLAN_ADD
,
8211 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
8213 memcpy(desc
[0].data
,
8215 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8216 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
8219 dev_err(&hdev
->pdev
->dev
,
8220 "lookup mac addr failed for cmd_send, ret =%d.\n",
8224 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
8225 retval
= le16_to_cpu(desc
[0].retval
);
8227 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8228 HCLGE_MAC_VLAN_LKUP
);
8231 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
8232 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8233 struct hclge_desc
*mc_desc
)
8235 struct hclge_dev
*hdev
= vport
->back
;
8242 struct hclge_desc desc
;
8244 hclge_cmd_setup_basic_desc(&desc
,
8245 HCLGE_OPC_MAC_VLAN_ADD
,
8247 memcpy(desc
.data
, req
,
8248 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8249 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8250 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8251 retval
= le16_to_cpu(desc
.retval
);
8253 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8255 HCLGE_MAC_VLAN_ADD
);
8257 hclge_comm_cmd_reuse_desc(&mc_desc
[0], false);
8258 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8259 hclge_comm_cmd_reuse_desc(&mc_desc
[1], false);
8260 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8261 hclge_comm_cmd_reuse_desc(&mc_desc
[2], false);
8262 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT
);
8263 memcpy(mc_desc
[0].data
, req
,
8264 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8265 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
8266 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
8267 retval
= le16_to_cpu(mc_desc
[0].retval
);
8269 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8271 HCLGE_MAC_VLAN_ADD
);
8275 dev_err(&hdev
->pdev
->dev
,
8276 "add mac addr failed for cmd_send, ret =%d.\n",
8284 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
8285 u16
*allocated_size
)
8287 struct hclge_umv_spc_alc_cmd
*req
;
8288 struct hclge_desc desc
;
8291 req
= (struct hclge_umv_spc_alc_cmd
*)desc
.data
;
8292 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_ALLOCATE
, false);
8294 req
->space_size
= cpu_to_le32(space_size
);
8296 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8298 dev_err(&hdev
->pdev
->dev
, "failed to set umv space, ret = %d\n",
8303 *allocated_size
= le32_to_cpu(desc
.data
[1]);
8308 static int hclge_init_umv_space(struct hclge_dev
*hdev
)
8310 u16 allocated_size
= 0;
8313 ret
= hclge_set_umv_space(hdev
, hdev
->wanted_umv_size
, &allocated_size
);
8317 if (allocated_size
< hdev
->wanted_umv_size
)
8318 dev_warn(&hdev
->pdev
->dev
,
8319 "failed to alloc umv space, want %u, get %u\n",
8320 hdev
->wanted_umv_size
, allocated_size
);
8322 hdev
->max_umv_size
= allocated_size
;
8323 hdev
->priv_umv_size
= hdev
->max_umv_size
/ (hdev
->num_alloc_vport
+ 1);
8324 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8325 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8327 if (hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8328 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B
, hdev
->ae_dev
->caps
);
8333 static void hclge_reset_umv_space(struct hclge_dev
*hdev
)
8335 struct hclge_vport
*vport
;
8338 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
8339 vport
= &hdev
->vport
[i
];
8340 vport
->used_umv_num
= 0;
8343 mutex_lock(&hdev
->vport_lock
);
8344 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8345 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8346 mutex_unlock(&hdev
->vport_lock
);
8348 hdev
->used_mc_mac_num
= 0;
8351 static bool hclge_is_umv_space_full(struct hclge_vport
*vport
, bool need_lock
)
8353 struct hclge_dev
*hdev
= vport
->back
;
8357 mutex_lock(&hdev
->vport_lock
);
8359 is_full
= (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8360 hdev
->share_umv_size
== 0);
8363 mutex_unlock(&hdev
->vport_lock
);
8368 static void hclge_update_umv_space(struct hclge_vport
*vport
, bool is_free
)
8370 struct hclge_dev
*hdev
= vport
->back
;
8373 if (vport
->used_umv_num
> hdev
->priv_umv_size
)
8374 hdev
->share_umv_size
++;
8376 if (vport
->used_umv_num
> 0)
8377 vport
->used_umv_num
--;
8379 if (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8380 hdev
->share_umv_size
> 0)
8381 hdev
->share_umv_size
--;
8382 vport
->used_umv_num
++;
8386 static struct hclge_mac_node
*hclge_find_mac_node(struct list_head
*list
,
8389 struct hclge_mac_node
*mac_node
, *tmp
;
8391 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
8392 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
8398 static void hclge_update_mac_node(struct hclge_mac_node
*mac_node
,
8399 enum HCLGE_MAC_NODE_STATE state
)
8402 /* from set_rx_mode or tmp_add_list */
8403 case HCLGE_MAC_TO_ADD
:
8404 if (mac_node
->state
== HCLGE_MAC_TO_DEL
)
8405 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8407 /* only from set_rx_mode */
8408 case HCLGE_MAC_TO_DEL
:
8409 if (mac_node
->state
== HCLGE_MAC_TO_ADD
) {
8410 list_del(&mac_node
->node
);
8413 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8416 /* only from tmp_add_list, the mac_node->state won't be
8419 case HCLGE_MAC_ACTIVE
:
8420 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8421 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8427 int hclge_update_mac_list(struct hclge_vport
*vport
,
8428 enum HCLGE_MAC_NODE_STATE state
,
8429 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8430 const unsigned char *addr
)
8432 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8433 struct hclge_dev
*hdev
= vport
->back
;
8434 struct hclge_mac_node
*mac_node
;
8435 struct list_head
*list
;
8437 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8438 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8440 spin_lock_bh(&vport
->mac_list_lock
);
8442 /* if the mac addr is already in the mac list, no need to add a new
8443 * one into it, just check the mac addr state, convert it to a new
8444 * state, or just remove it, or do nothing.
8446 mac_node
= hclge_find_mac_node(list
, addr
);
8448 hclge_update_mac_node(mac_node
, state
);
8449 spin_unlock_bh(&vport
->mac_list_lock
);
8450 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8454 /* if this address is never added, unnecessary to delete */
8455 if (state
== HCLGE_MAC_TO_DEL
) {
8456 spin_unlock_bh(&vport
->mac_list_lock
);
8457 hnae3_format_mac_addr(format_mac_addr
, addr
);
8458 dev_err(&hdev
->pdev
->dev
,
8459 "failed to delete address %s from mac list\n",
8464 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
8466 spin_unlock_bh(&vport
->mac_list_lock
);
8470 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8472 mac_node
->state
= state
;
8473 ether_addr_copy(mac_node
->mac_addr
, addr
);
8474 list_add_tail(&mac_node
->node
, list
);
8476 spin_unlock_bh(&vport
->mac_list_lock
);
8481 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
8482 const unsigned char *addr
)
8484 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8486 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_UC
,
8490 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
8491 const unsigned char *addr
)
8493 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8494 struct hclge_dev
*hdev
= vport
->back
;
8495 struct hclge_mac_vlan_tbl_entry_cmd req
;
8496 struct hclge_desc desc
;
8497 u16 egress_port
= 0;
8500 /* mac addr check */
8501 if (is_zero_ether_addr(addr
) ||
8502 is_broadcast_ether_addr(addr
) ||
8503 is_multicast_ether_addr(addr
)) {
8504 hnae3_format_mac_addr(format_mac_addr
, addr
);
8505 dev_err(&hdev
->pdev
->dev
,
8506 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8507 format_mac_addr
, is_zero_ether_addr(addr
),
8508 is_broadcast_ether_addr(addr
),
8509 is_multicast_ether_addr(addr
));
8513 memset(&req
, 0, sizeof(req
));
8515 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
8516 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
8518 req
.egress_port
= cpu_to_le16(egress_port
);
8520 hclge_prepare_mac_addr(&req
, addr
, false);
8522 /* Lookup the mac address in the mac_vlan table, and add
8523 * it if the entry is inexistent. Repeated unicast entry
8524 * is not allowed in the mac vlan table.
8526 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
8527 if (ret
== -ENOENT
) {
8528 mutex_lock(&hdev
->vport_lock
);
8529 if (!hclge_is_umv_space_full(vport
, false)) {
8530 ret
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
8532 hclge_update_umv_space(vport
, false);
8533 mutex_unlock(&hdev
->vport_lock
);
8536 mutex_unlock(&hdev
->vport_lock
);
8538 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
))
8539 dev_err(&hdev
->pdev
->dev
, "UC MAC table full(%u)\n",
8540 hdev
->priv_umv_size
);
8545 /* check if we just hit the duplicate */
8552 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
8553 const unsigned char *addr
)
8555 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8557 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_UC
,
8561 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
8562 const unsigned char *addr
)
8564 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8565 struct hclge_dev
*hdev
= vport
->back
;
8566 struct hclge_mac_vlan_tbl_entry_cmd req
;
8569 /* mac addr check */
8570 if (is_zero_ether_addr(addr
) ||
8571 is_broadcast_ether_addr(addr
) ||
8572 is_multicast_ether_addr(addr
)) {
8573 hnae3_format_mac_addr(format_mac_addr
, addr
);
8574 dev_dbg(&hdev
->pdev
->dev
, "Remove mac err! invalid mac:%s.\n",
8579 memset(&req
, 0, sizeof(req
));
8580 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
8581 hclge_prepare_mac_addr(&req
, addr
, false);
8582 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8583 if (!ret
|| ret
== -ENOENT
) {
8584 mutex_lock(&hdev
->vport_lock
);
8585 hclge_update_umv_space(vport
, true);
8586 mutex_unlock(&hdev
->vport_lock
);
8593 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
8594 const unsigned char *addr
)
8596 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8598 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_MC
,
8602 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
8603 const unsigned char *addr
)
8605 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8606 struct hclge_dev
*hdev
= vport
->back
;
8607 struct hclge_mac_vlan_tbl_entry_cmd req
;
8608 struct hclge_desc desc
[3];
8609 bool is_new_addr
= false;
8612 /* mac addr check */
8613 if (!is_multicast_ether_addr(addr
)) {
8614 hnae3_format_mac_addr(format_mac_addr
, addr
);
8615 dev_err(&hdev
->pdev
->dev
,
8616 "Add mc mac err! invalid mac:%s.\n",
8620 memset(&req
, 0, sizeof(req
));
8621 hclge_prepare_mac_addr(&req
, addr
, true);
8622 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8624 if (hnae3_ae_dev_mc_mac_mng_supported(hdev
->ae_dev
) &&
8625 hdev
->used_mc_mac_num
>=
8626 hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8631 /* This mac addr do not exist, add new entry for it */
8632 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
8633 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
8634 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
8636 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
8639 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8640 if (status
== -ENOSPC
)
8642 else if (!status
&& is_new_addr
)
8643 hdev
->used_mc_mac_num
++;
8648 /* if already overflow, not to print each time */
8649 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
)) {
8650 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8651 dev_err(&hdev
->pdev
->dev
, "mc mac vlan table is full\n");
8657 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
8658 const unsigned char *addr
)
8660 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8662 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_MC
,
8666 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
8667 const unsigned char *addr
)
8669 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8670 struct hclge_dev
*hdev
= vport
->back
;
8671 struct hclge_mac_vlan_tbl_entry_cmd req
;
8672 enum hclge_comm_cmd_status status
;
8673 struct hclge_desc desc
[3];
8675 /* mac addr check */
8676 if (!is_multicast_ether_addr(addr
)) {
8677 hnae3_format_mac_addr(format_mac_addr
, addr
);
8678 dev_dbg(&hdev
->pdev
->dev
,
8679 "Remove mc mac err! invalid mac:%s.\n",
8684 memset(&req
, 0, sizeof(req
));
8685 hclge_prepare_mac_addr(&req
, addr
, true);
8686 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8688 /* This mac addr exist, remove this handle's VFID for it */
8689 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
8693 if (hclge_is_all_function_id_zero(desc
)) {
8694 /* All the vfid is zero, so need to delete this entry */
8695 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8697 hdev
->used_mc_mac_num
--;
8699 /* Not all the vfid is zero, update the vfid */
8700 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8702 } else if (status
== -ENOENT
) {
8709 static void hclge_sync_vport_mac_list(struct hclge_vport
*vport
,
8710 struct list_head
*list
,
8711 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8713 int (*sync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8714 struct hclge_mac_node
*mac_node
, *tmp
;
8717 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8718 sync
= hclge_add_uc_addr_common
;
8720 sync
= hclge_add_mc_addr_common
;
8722 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8723 ret
= sync(vport
, mac_node
->mac_addr
);
8725 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8727 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8730 /* If one unicast mac address is existing in hardware,
8731 * we need to try whether other unicast mac addresses
8732 * are new addresses that can be added.
8733 * Multicast mac address can be reusable, even though
8734 * there is no space to add new multicast mac address,
8735 * we should check whether other mac addresses are
8736 * existing in hardware for reuse.
8738 if ((mac_type
== HCLGE_MAC_ADDR_UC
&& ret
!= -EEXIST
) ||
8739 (mac_type
== HCLGE_MAC_ADDR_MC
&& ret
!= -ENOSPC
))
8745 static void hclge_unsync_vport_mac_list(struct hclge_vport
*vport
,
8746 struct list_head
*list
,
8747 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8749 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8750 struct hclge_mac_node
*mac_node
, *tmp
;
8753 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8754 unsync
= hclge_rm_uc_addr_common
;
8756 unsync
= hclge_rm_mc_addr_common
;
8758 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8759 ret
= unsync(vport
, mac_node
->mac_addr
);
8760 if (!ret
|| ret
== -ENOENT
) {
8761 list_del(&mac_node
->node
);
8764 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8771 static bool hclge_sync_from_add_list(struct list_head
*add_list
,
8772 struct list_head
*mac_list
)
8774 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8775 bool all_added
= true;
8777 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
8778 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8781 /* if the mac address from tmp_add_list is not in the
8782 * uc/mc_mac_list, it means have received a TO_DEL request
8783 * during the time window of adding the mac address into mac
8784 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8785 * then it will be removed at next time. else it must be TO_ADD,
8786 * this address hasn't been added into mac table,
8787 * so just remove the mac node.
8789 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8791 hclge_update_mac_node(new_node
, mac_node
->state
);
8792 list_del(&mac_node
->node
);
8794 } else if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
8795 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8796 list_move_tail(&mac_node
->node
, mac_list
);
8798 list_del(&mac_node
->node
);
8806 static void hclge_sync_from_del_list(struct list_head
*del_list
,
8807 struct list_head
*mac_list
)
8809 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8811 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
8812 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8814 /* If the mac addr exists in the mac list, it means
8815 * received a new TO_ADD request during the time window
8816 * of configuring the mac address. For the mac node
8817 * state is TO_ADD, and the address is already in the
8818 * in the hardware(due to delete fail), so we just need
8819 * to change the mac node state to ACTIVE.
8821 new_node
->state
= HCLGE_MAC_ACTIVE
;
8822 list_del(&mac_node
->node
);
8825 list_move_tail(&mac_node
->node
, mac_list
);
8830 static void hclge_update_overflow_flags(struct hclge_vport
*vport
,
8831 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8834 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
8836 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_UPE
;
8837 else if (hclge_is_umv_space_full(vport
, true))
8838 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_UPE
;
8841 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_MPE
;
8843 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8847 static void hclge_sync_vport_mac_table(struct hclge_vport
*vport
,
8848 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8850 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8851 struct list_head tmp_add_list
, tmp_del_list
;
8852 struct list_head
*list
;
8855 INIT_LIST_HEAD(&tmp_add_list
);
8856 INIT_LIST_HEAD(&tmp_del_list
);
8858 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8859 * we can add/delete these mac addr outside the spin lock
8861 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8862 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8864 spin_lock_bh(&vport
->mac_list_lock
);
8866 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8867 switch (mac_node
->state
) {
8868 case HCLGE_MAC_TO_DEL
:
8869 list_move_tail(&mac_node
->node
, &tmp_del_list
);
8871 case HCLGE_MAC_TO_ADD
:
8872 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
8875 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
8876 new_node
->state
= mac_node
->state
;
8877 list_add_tail(&new_node
->node
, &tmp_add_list
);
8885 spin_unlock_bh(&vport
->mac_list_lock
);
8887 /* delete first, in order to get max mac table space for adding */
8888 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
8889 hclge_sync_vport_mac_list(vport
, &tmp_add_list
, mac_type
);
8891 /* if some mac addresses were added/deleted fail, move back to the
8892 * mac_list, and retry at next time.
8894 spin_lock_bh(&vport
->mac_list_lock
);
8896 hclge_sync_from_del_list(&tmp_del_list
, list
);
8897 all_added
= hclge_sync_from_add_list(&tmp_add_list
, list
);
8899 spin_unlock_bh(&vport
->mac_list_lock
);
8901 hclge_update_overflow_flags(vport
, mac_type
, all_added
);
8904 static bool hclge_need_sync_mac_table(struct hclge_vport
*vport
)
8906 struct hclge_dev
*hdev
= vport
->back
;
8908 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
))
8911 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
))
8917 static void hclge_sync_mac_table(struct hclge_dev
*hdev
)
8921 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
8922 struct hclge_vport
*vport
= &hdev
->vport
[i
];
8924 if (!hclge_need_sync_mac_table(vport
))
8927 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_UC
);
8928 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_MC
);
8932 static void hclge_build_del_list(struct list_head
*list
,
8934 struct list_head
*tmp_del_list
)
8936 struct hclge_mac_node
*mac_cfg
, *tmp
;
8938 list_for_each_entry_safe(mac_cfg
, tmp
, list
, node
) {
8939 switch (mac_cfg
->state
) {
8940 case HCLGE_MAC_TO_DEL
:
8941 case HCLGE_MAC_ACTIVE
:
8942 list_move_tail(&mac_cfg
->node
, tmp_del_list
);
8944 case HCLGE_MAC_TO_ADD
:
8946 list_del(&mac_cfg
->node
);
8954 static void hclge_unsync_del_list(struct hclge_vport
*vport
,
8955 int (*unsync
)(struct hclge_vport
*vport
,
8956 const unsigned char *addr
),
8958 struct list_head
*tmp_del_list
)
8960 struct hclge_mac_node
*mac_cfg
, *tmp
;
8963 list_for_each_entry_safe(mac_cfg
, tmp
, tmp_del_list
, node
) {
8964 ret
= unsync(vport
, mac_cfg
->mac_addr
);
8965 if (!ret
|| ret
== -ENOENT
) {
8966 /* clear all mac addr from hardware, but remain these
8967 * mac addr in the mac list, and restore them after
8968 * vf reset finished.
8971 mac_cfg
->state
== HCLGE_MAC_ACTIVE
) {
8972 mac_cfg
->state
= HCLGE_MAC_TO_ADD
;
8974 list_del(&mac_cfg
->node
);
8977 } else if (is_del_list
) {
8978 mac_cfg
->state
= HCLGE_MAC_TO_DEL
;
8983 void hclge_rm_vport_all_mac_table(struct hclge_vport
*vport
, bool is_del_list
,
8984 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8986 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8987 struct hclge_dev
*hdev
= vport
->back
;
8988 struct list_head tmp_del_list
, *list
;
8990 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
8991 list
= &vport
->uc_mac_list
;
8992 unsync
= hclge_rm_uc_addr_common
;
8994 list
= &vport
->mc_mac_list
;
8995 unsync
= hclge_rm_mc_addr_common
;
8998 INIT_LIST_HEAD(&tmp_del_list
);
9001 set_bit(vport
->vport_id
, hdev
->vport_config_block
);
9003 spin_lock_bh(&vport
->mac_list_lock
);
9005 hclge_build_del_list(list
, is_del_list
, &tmp_del_list
);
9007 spin_unlock_bh(&vport
->mac_list_lock
);
9009 hclge_unsync_del_list(vport
, unsync
, is_del_list
, &tmp_del_list
);
9011 spin_lock_bh(&vport
->mac_list_lock
);
9013 hclge_sync_from_del_list(&tmp_del_list
, list
);
9015 spin_unlock_bh(&vport
->mac_list_lock
);
9018 /* remove all mac address when uninitailize */
9019 static void hclge_uninit_vport_mac_list(struct hclge_vport
*vport
,
9020 enum HCLGE_MAC_ADDR_TYPE mac_type
)
9022 struct hclge_mac_node
*mac_node
, *tmp
;
9023 struct hclge_dev
*hdev
= vport
->back
;
9024 struct list_head tmp_del_list
, *list
;
9026 INIT_LIST_HEAD(&tmp_del_list
);
9028 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
9029 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
9031 spin_lock_bh(&vport
->mac_list_lock
);
9033 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
9034 switch (mac_node
->state
) {
9035 case HCLGE_MAC_TO_DEL
:
9036 case HCLGE_MAC_ACTIVE
:
9037 list_move_tail(&mac_node
->node
, &tmp_del_list
);
9039 case HCLGE_MAC_TO_ADD
:
9040 list_del(&mac_node
->node
);
9046 spin_unlock_bh(&vport
->mac_list_lock
);
9048 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
9050 if (!list_empty(&tmp_del_list
))
9051 dev_warn(&hdev
->pdev
->dev
,
9052 "uninit %s mac list for vport %u not completely.\n",
9053 mac_type
== HCLGE_MAC_ADDR_UC
? "uc" : "mc",
9056 list_for_each_entry_safe(mac_node
, tmp
, &tmp_del_list
, node
) {
9057 list_del(&mac_node
->node
);
9062 static void hclge_uninit_mac_table(struct hclge_dev
*hdev
)
9064 struct hclge_vport
*vport
;
9067 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9068 vport
= &hdev
->vport
[i
];
9069 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_UC
);
9070 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_MC
);
9074 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
9075 u16 cmdq_resp
, u8 resp_code
)
9077 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9078 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9079 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9080 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9085 dev_err(&hdev
->pdev
->dev
,
9086 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9091 switch (resp_code
) {
9092 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
9093 case HCLGE_ETHERTYPE_ALREADY_ADD
:
9096 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
9097 dev_err(&hdev
->pdev
->dev
,
9098 "add mac ethertype failed for manager table overflow.\n");
9099 return_status
= -EIO
;
9101 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
9102 dev_err(&hdev
->pdev
->dev
,
9103 "add mac ethertype failed for key conflict.\n");
9104 return_status
= -EIO
;
9107 dev_err(&hdev
->pdev
->dev
,
9108 "add mac ethertype failed for undefined, code=%u.\n",
9110 return_status
= -EIO
;
9113 return return_status
;
9116 static int hclge_set_vf_mac(struct hnae3_handle
*handle
, int vf
,
9119 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9120 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9121 struct hclge_dev
*hdev
= vport
->back
;
9123 vport
= hclge_get_vf_vport(hdev
, vf
);
9127 hnae3_format_mac_addr(format_mac_addr
, mac_addr
);
9128 if (ether_addr_equal(mac_addr
, vport
->vf_info
.mac
)) {
9129 dev_info(&hdev
->pdev
->dev
,
9130 "Specified MAC(=%s) is same as before, no change committed!\n",
9135 ether_addr_copy(vport
->vf_info
.mac
, mac_addr
);
9137 /* there is a timewindow for PF to know VF unalive, it may
9138 * cause send mailbox fail, but it doesn't matter, VF will
9139 * query it when reinit.
9141 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
)) {
9142 dev_info(&hdev
->pdev
->dev
,
9143 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9144 vf
, format_mac_addr
);
9145 (void)hclge_inform_reset_assert_to_vf(vport
);
9149 dev_info(&hdev
->pdev
->dev
,
9150 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9151 vf
, format_mac_addr
);
9155 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
9156 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
9158 struct hclge_desc desc
;
9163 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
9164 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
9166 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9168 dev_err(&hdev
->pdev
->dev
,
9169 "add mac ethertype failed for cmd_send, ret =%d.\n",
9174 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
9175 retval
= le16_to_cpu(desc
.retval
);
9177 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
9180 static int init_mgr_tbl(struct hclge_dev
*hdev
)
9185 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
9186 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
9188 dev_err(&hdev
->pdev
->dev
,
9189 "add mac ethertype failed, ret =%d.\n",
9198 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
9200 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9201 struct hclge_dev
*hdev
= vport
->back
;
9203 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
9206 int hclge_update_mac_node_for_dev_addr(struct hclge_vport
*vport
,
9207 const u8
*old_addr
, const u8
*new_addr
)
9209 struct list_head
*list
= &vport
->uc_mac_list
;
9210 struct hclge_mac_node
*old_node
, *new_node
;
9212 new_node
= hclge_find_mac_node(list
, new_addr
);
9214 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
9218 new_node
->state
= HCLGE_MAC_TO_ADD
;
9219 ether_addr_copy(new_node
->mac_addr
, new_addr
);
9220 list_add(&new_node
->node
, list
);
9222 if (new_node
->state
== HCLGE_MAC_TO_DEL
)
9223 new_node
->state
= HCLGE_MAC_ACTIVE
;
9225 /* make sure the new addr is in the list head, avoid dev
9226 * addr may be not re-added into mac table for the umv space
9227 * limitation after global/imp reset which will clear mac
9228 * table by hardware.
9230 list_move(&new_node
->node
, list
);
9233 if (old_addr
&& !ether_addr_equal(old_addr
, new_addr
)) {
9234 old_node
= hclge_find_mac_node(list
, old_addr
);
9236 if (old_node
->state
== HCLGE_MAC_TO_ADD
) {
9237 list_del(&old_node
->node
);
9240 old_node
->state
= HCLGE_MAC_TO_DEL
;
9245 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
9250 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, const void *p
,
9253 const unsigned char *new_addr
= (const unsigned char *)p
;
9254 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9255 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9256 struct hclge_dev
*hdev
= vport
->back
;
9257 unsigned char *old_addr
= NULL
;
9260 /* mac addr check */
9261 if (is_zero_ether_addr(new_addr
) ||
9262 is_broadcast_ether_addr(new_addr
) ||
9263 is_multicast_ether_addr(new_addr
)) {
9264 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9265 dev_err(&hdev
->pdev
->dev
,
9266 "change uc mac err! invalid mac: %s.\n",
9271 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
9273 dev_err(&hdev
->pdev
->dev
,
9274 "failed to configure mac pause address, ret = %d\n",
9280 old_addr
= hdev
->hw
.mac
.mac_addr
;
9282 spin_lock_bh(&vport
->mac_list_lock
);
9283 ret
= hclge_update_mac_node_for_dev_addr(vport
, old_addr
, new_addr
);
9285 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9286 dev_err(&hdev
->pdev
->dev
,
9287 "failed to change the mac addr:%s, ret = %d\n",
9288 format_mac_addr
, ret
);
9289 spin_unlock_bh(&vport
->mac_list_lock
);
9292 hclge_pause_addr_cfg(hdev
, old_addr
);
9296 /* we must update dev addr with spin lock protect, preventing dev addr
9297 * being removed by set_rx_mode path.
9299 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
9300 spin_unlock_bh(&vport
->mac_list_lock
);
9302 hclge_task_schedule(hdev
, 0);
9307 static int hclge_mii_ioctl(struct hclge_dev
*hdev
, struct ifreq
*ifr
, int cmd
)
9309 struct mii_ioctl_data
*data
= if_mii(ifr
);
9311 if (!hnae3_dev_phy_imp_supported(hdev
))
9316 data
->phy_id
= hdev
->hw
.mac
.phy_addr
;
9317 /* this command reads phy id and register at the same time */
9320 data
->val_out
= hclge_read_phy_reg(hdev
, data
->reg_num
);
9324 return hclge_write_phy_reg(hdev
, data
->reg_num
, data
->val_in
);
9330 static int hclge_do_ioctl(struct hnae3_handle
*handle
, struct ifreq
*ifr
,
9333 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9334 struct hclge_dev
*hdev
= vport
->back
;
9338 return hclge_ptp_get_cfg(hdev
, ifr
);
9340 return hclge_ptp_set_cfg(hdev
, ifr
);
9342 if (!hdev
->hw
.mac
.phydev
)
9343 return hclge_mii_ioctl(hdev
, ifr
, cmd
);
9346 return phy_mii_ioctl(hdev
->hw
.mac
.phydev
, ifr
, cmd
);
9349 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev
*hdev
, u8 vf_id
,
9352 struct hclge_port_vlan_filter_bypass_cmd
*req
;
9353 struct hclge_desc desc
;
9356 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PORT_VLAN_BYPASS
, false);
9357 req
= (struct hclge_port_vlan_filter_bypass_cmd
*)desc
.data
;
9359 hnae3_set_bit(req
->bypass_state
, HCLGE_INGRESS_BYPASS_B
,
9362 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9364 dev_err(&hdev
->pdev
->dev
,
9365 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9371 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
9372 u8 fe_type
, bool filter_en
, u8 vf_id
)
9374 struct hclge_vlan_filter_ctrl_cmd
*req
;
9375 struct hclge_desc desc
;
9378 /* read current vlan filter parameter */
9379 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, true);
9380 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
9381 req
->vlan_type
= vlan_type
;
9384 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9386 dev_err(&hdev
->pdev
->dev
, "failed to get vport%u vlan filter config, ret = %d.\n",
9391 /* modify and write new config parameter */
9392 hclge_comm_cmd_reuse_desc(&desc
, false);
9393 req
->vlan_fe
= filter_en
?
9394 (req
->vlan_fe
| fe_type
) : (req
->vlan_fe
& ~fe_type
);
9396 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9398 dev_err(&hdev
->pdev
->dev
, "failed to set vport%u vlan filter, ret = %d.\n",
9404 static int hclge_set_vport_vlan_filter(struct hclge_vport
*vport
, bool enable
)
9406 struct hclge_dev
*hdev
= vport
->back
;
9407 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
9410 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9411 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9412 HCLGE_FILTER_FE_EGRESS_V1_B
,
9413 enable
, vport
->vport_id
);
9415 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9416 HCLGE_FILTER_FE_EGRESS
, enable
,
9421 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B
, ae_dev
->caps
)) {
9422 ret
= hclge_set_port_vlan_filter_bypass(hdev
, vport
->vport_id
,
9424 } else if (!vport
->vport_id
) {
9425 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
9428 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
9429 HCLGE_FILTER_FE_INGRESS
,
9436 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport
*vport
)
9438 struct hnae3_handle
*handle
= &vport
->nic
;
9439 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9440 struct hclge_dev
*hdev
= vport
->back
;
9442 if (vport
->vport_id
) {
9443 if (vport
->port_base_vlan_cfg
.state
!=
9444 HNAE3_PORT_BASE_VLAN_DISABLE
)
9447 if (vport
->vf_info
.trusted
&& vport
->vf_info
.request_uc_en
)
9449 } else if (handle
->netdev_flags
& HNAE3_USER_UPE
) {
9453 if (!vport
->req_vlan_fltr_en
)
9456 /* compatible with former device, always enable vlan filter */
9457 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
9460 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
)
9461 if (vlan
->vlan_id
!= 0)
9467 int hclge_enable_vport_vlan_filter(struct hclge_vport
*vport
, bool request_en
)
9469 struct hclge_dev
*hdev
= vport
->back
;
9473 mutex_lock(&hdev
->vport_lock
);
9475 vport
->req_vlan_fltr_en
= request_en
;
9477 need_en
= hclge_need_enable_vport_vlan_filter(vport
);
9478 if (need_en
== vport
->cur_vlan_fltr_en
) {
9479 mutex_unlock(&hdev
->vport_lock
);
9483 ret
= hclge_set_vport_vlan_filter(vport
, need_en
);
9485 mutex_unlock(&hdev
->vport_lock
);
9489 vport
->cur_vlan_fltr_en
= need_en
;
9491 mutex_unlock(&hdev
->vport_lock
);
9496 static int hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
9498 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9500 return hclge_enable_vport_vlan_filter(vport
, enable
);
9503 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev
*hdev
, u16 vfid
,
9504 bool is_kill
, u16 vlan
,
9505 struct hclge_desc
*desc
)
9507 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
9508 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
9513 hclge_cmd_setup_basic_desc(&desc
[0],
9514 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9515 hclge_cmd_setup_basic_desc(&desc
[1],
9516 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9518 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
9520 vf_byte_off
= vfid
/ 8;
9521 vf_byte_val
= 1 << (vfid
% 8);
9523 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9524 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
9526 req0
->vlan_id
= cpu_to_le16(vlan
);
9527 req0
->vlan_cfg
= is_kill
;
9529 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
9530 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
9532 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
9534 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
9536 dev_err(&hdev
->pdev
->dev
,
9537 "Send vf vlan command fail, ret =%d.\n",
9545 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev
*hdev
, u16 vfid
,
9546 bool is_kill
, struct hclge_desc
*desc
)
9548 struct hclge_vlan_filter_vf_cfg_cmd
*req
;
9550 req
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9553 #define HCLGE_VF_VLAN_NO_ENTRY 2
9554 if (!req
->resp_code
|| req
->resp_code
== 1)
9557 if (req
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
9558 set_bit(vfid
, hdev
->vf_vlan_full
);
9559 dev_warn(&hdev
->pdev
->dev
,
9560 "vf vlan table is full, vf vlan filter is disabled\n");
9564 dev_err(&hdev
->pdev
->dev
,
9565 "Add vf vlan filter fail, ret =%u.\n",
9568 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9569 if (!req
->resp_code
)
9572 /* vf vlan filter is disabled when vf vlan table is full,
9573 * then new vlan id will not be added into vf vlan table.
9574 * Just return 0 without warning, avoid massive verbose
9575 * print logs when unload.
9577 if (req
->resp_code
== HCLGE_VF_VLAN_DEL_NO_FOUND
)
9580 dev_err(&hdev
->pdev
->dev
,
9581 "Kill vf vlan filter fail, ret =%u.\n",
9588 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, u16 vfid
,
9589 bool is_kill
, u16 vlan
)
9591 struct hclge_vport
*vport
= &hdev
->vport
[vfid
];
9592 struct hclge_desc desc
[2];
9595 /* if vf vlan table is full, firmware will close vf vlan filter, it
9596 * is unable and unnecessary to add new vlan id to vf vlan filter.
9597 * If spoof check is enable, and vf vlan is full, it shouldn't add
9598 * new vlan, because tx packets with these vlan id will be dropped.
9600 if (test_bit(vfid
, hdev
->vf_vlan_full
) && !is_kill
) {
9601 if (vport
->vf_info
.spoofchk
&& vlan
) {
9602 dev_err(&hdev
->pdev
->dev
,
9603 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9609 ret
= hclge_set_vf_vlan_filter_cmd(hdev
, vfid
, is_kill
, vlan
, desc
);
9613 return hclge_check_vf_vlan_cmd_status(hdev
, vfid
, is_kill
, desc
);
9616 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
9617 u16 vlan_id
, bool is_kill
)
9619 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
9620 struct hclge_desc desc
;
9621 u8 vlan_offset_byte_val
;
9622 u8 vlan_offset_byte
;
9626 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
9628 vlan_offset_160
= vlan_id
/ HCLGE_VLAN_ID_OFFSET_STEP
;
9629 vlan_offset_byte
= (vlan_id
% HCLGE_VLAN_ID_OFFSET_STEP
) /
9630 HCLGE_VLAN_BYTE_SIZE
;
9631 vlan_offset_byte_val
= 1 << (vlan_id
% HCLGE_VLAN_BYTE_SIZE
);
9633 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
9634 req
->vlan_offset
= vlan_offset_160
;
9635 req
->vlan_cfg
= is_kill
;
9636 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
9638 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9640 dev_err(&hdev
->pdev
->dev
,
9641 "port vlan command, send fail, ret =%d.\n", ret
);
9645 static bool hclge_need_update_port_vlan(struct hclge_dev
*hdev
, u16 vport_id
,
9646 u16 vlan_id
, bool is_kill
)
9648 /* vlan 0 may be added twice when 8021q module is enabled */
9649 if (!is_kill
&& !vlan_id
&&
9650 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
9653 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9654 dev_warn(&hdev
->pdev
->dev
,
9655 "Add port vlan failed, vport %u is already in vlan %u\n",
9661 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9662 dev_warn(&hdev
->pdev
->dev
,
9663 "Delete port vlan failed, vport %u is not in vlan %u\n",
9671 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
9672 u16 vport_id
, u16 vlan_id
,
9675 u16 vport_idx
, vport_num
= 0;
9678 if (is_kill
&& !vlan_id
)
9681 if (vlan_id
>= VLAN_N_VID
)
9684 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
);
9686 dev_err(&hdev
->pdev
->dev
,
9687 "Set %u vport vlan filter config fail, ret =%d.\n",
9692 if (!hclge_need_update_port_vlan(hdev
, vport_id
, vlan_id
, is_kill
))
9695 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], HCLGE_VPORT_NUM
)
9698 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
9699 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
9705 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
9707 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
9708 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
9709 struct hclge_dev
*hdev
= vport
->back
;
9710 struct hclge_desc desc
;
9714 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
9716 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
9717 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
9718 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
9719 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
9720 vcfg
->accept_tag1
? 1 : 0);
9721 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
9722 vcfg
->accept_untag1
? 1 : 0);
9723 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
9724 vcfg
->accept_tag2
? 1 : 0);
9725 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
9726 vcfg
->accept_untag2
? 1 : 0);
9727 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
9728 vcfg
->insert_tag1_en
? 1 : 0);
9729 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
9730 vcfg
->insert_tag2_en
? 1 : 0);
9731 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_TAG_SHIFT_MODE_EN_B
,
9732 vcfg
->tag_shift_mode_en
? 1 : 0);
9733 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
9735 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9736 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9737 HCLGE_VF_NUM_PER_BYTE
;
9738 req
->vf_bitmap
[bmap_index
] =
9739 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9741 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9743 dev_err(&hdev
->pdev
->dev
,
9744 "Send port txvlan cfg command fail, ret =%d\n",
9750 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
9752 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
9753 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
9754 struct hclge_dev
*hdev
= vport
->back
;
9755 struct hclge_desc desc
;
9759 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
9761 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
9762 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
9763 vcfg
->strip_tag1_en
? 1 : 0);
9764 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
9765 vcfg
->strip_tag2_en
? 1 : 0);
9766 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
9767 vcfg
->vlan1_vlan_prionly
? 1 : 0);
9768 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
9769 vcfg
->vlan2_vlan_prionly
? 1 : 0);
9770 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG1_EN_B
,
9771 vcfg
->strip_tag1_discard_en
? 1 : 0);
9772 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG2_EN_B
,
9773 vcfg
->strip_tag2_discard_en
? 1 : 0);
9775 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9776 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9777 HCLGE_VF_NUM_PER_BYTE
;
9778 req
->vf_bitmap
[bmap_index
] =
9779 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9781 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9783 dev_err(&hdev
->pdev
->dev
,
9784 "Send port rxvlan cfg command fail, ret =%d\n",
9790 static int hclge_vlan_offload_cfg(struct hclge_vport
*vport
,
9791 u16 port_base_vlan_state
,
9792 u16 vlan_tag
, u8 qos
)
9796 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9797 vport
->txvlan_cfg
.accept_tag1
= true;
9798 vport
->txvlan_cfg
.insert_tag1_en
= false;
9799 vport
->txvlan_cfg
.default_tag1
= 0;
9801 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(vport
->nic
.pdev
);
9803 vport
->txvlan_cfg
.accept_tag1
=
9804 ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
;
9805 vport
->txvlan_cfg
.insert_tag1_en
= true;
9806 vport
->txvlan_cfg
.default_tag1
= (qos
<< VLAN_PRIO_SHIFT
) |
9810 vport
->txvlan_cfg
.accept_untag1
= true;
9812 /* accept_tag2 and accept_untag2 are not supported on
9813 * pdev revision(0x20), new revision support them,
9814 * this two fields can not be configured by user.
9816 vport
->txvlan_cfg
.accept_tag2
= true;
9817 vport
->txvlan_cfg
.accept_untag2
= true;
9818 vport
->txvlan_cfg
.insert_tag2_en
= false;
9819 vport
->txvlan_cfg
.default_tag2
= 0;
9820 vport
->txvlan_cfg
.tag_shift_mode_en
= true;
9822 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9823 vport
->rxvlan_cfg
.strip_tag1_en
= false;
9824 vport
->rxvlan_cfg
.strip_tag2_en
=
9825 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9826 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
9828 vport
->rxvlan_cfg
.strip_tag1_en
=
9829 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9830 vport
->rxvlan_cfg
.strip_tag2_en
= true;
9831 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
9834 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
9835 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
9836 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
9838 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
9842 return hclge_set_vlan_rx_offload_cfg(vport
);
9845 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
9847 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
9848 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
9849 struct hclge_desc desc
;
9852 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
9853 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
9854 rx_req
->ot_fst_vlan_type
=
9855 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
9856 rx_req
->ot_sec_vlan_type
=
9857 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
9858 rx_req
->in_fst_vlan_type
=
9859 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
9860 rx_req
->in_sec_vlan_type
=
9861 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
9863 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9865 dev_err(&hdev
->pdev
->dev
,
9866 "Send rxvlan protocol type command fail, ret =%d\n",
9871 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
9873 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)desc
.data
;
9874 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
9875 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
9877 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9879 dev_err(&hdev
->pdev
->dev
,
9880 "Send txvlan protocol type command fail, ret =%d\n",
9886 static int hclge_init_vlan_filter(struct hclge_dev
*hdev
)
9888 struct hclge_vport
*vport
;
9892 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9893 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9894 HCLGE_FILTER_FE_EGRESS_V1_B
,
9897 /* for revision 0x21, vf vlan filter is per function */
9898 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9899 vport
= &hdev
->vport
[i
];
9900 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9901 HCLGE_FILTER_FE_EGRESS
, true,
9905 vport
->cur_vlan_fltr_en
= true;
9908 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
9909 HCLGE_FILTER_FE_INGRESS
, true, 0);
9912 static int hclge_init_vlan_type(struct hclge_dev
*hdev
)
9914 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= ETH_P_8021Q
;
9915 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= ETH_P_8021Q
;
9916 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= ETH_P_8021Q
;
9917 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= ETH_P_8021Q
;
9918 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= ETH_P_8021Q
;
9919 hdev
->vlan_type_cfg
.tx_in_vlan_type
= ETH_P_8021Q
;
9921 return hclge_set_vlan_protocol_type(hdev
);
9924 static int hclge_init_vport_vlan_offload(struct hclge_dev
*hdev
)
9926 struct hclge_port_base_vlan_config
*cfg
;
9927 struct hclge_vport
*vport
;
9931 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9932 vport
= &hdev
->vport
[i
];
9933 cfg
= &vport
->port_base_vlan_cfg
;
9935 ret
= hclge_vlan_offload_cfg(vport
, cfg
->state
,
9936 cfg
->vlan_info
.vlan_tag
,
9937 cfg
->vlan_info
.qos
);
9944 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
9946 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
9949 ret
= hclge_init_vlan_filter(hdev
);
9953 ret
= hclge_init_vlan_type(hdev
);
9957 ret
= hclge_init_vport_vlan_offload(hdev
);
9961 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
9964 static void hclge_add_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
9967 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9968 struct hclge_dev
*hdev
= vport
->back
;
9970 mutex_lock(&hdev
->vport_lock
);
9972 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
9973 if (vlan
->vlan_id
== vlan_id
) {
9974 mutex_unlock(&hdev
->vport_lock
);
9979 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
9981 mutex_unlock(&hdev
->vport_lock
);
9985 vlan
->hd_tbl_status
= writen_to_tbl
;
9986 vlan
->vlan_id
= vlan_id
;
9988 list_add_tail(&vlan
->node
, &vport
->vlan_list
);
9989 mutex_unlock(&hdev
->vport_lock
);
9992 static int hclge_add_vport_all_vlan_table(struct hclge_vport
*vport
)
9994 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9995 struct hclge_dev
*hdev
= vport
->back
;
9998 mutex_lock(&hdev
->vport_lock
);
10000 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10001 if (!vlan
->hd_tbl_status
) {
10002 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10004 vlan
->vlan_id
, false);
10006 dev_err(&hdev
->pdev
->dev
,
10007 "restore vport vlan list failed, ret=%d\n",
10010 mutex_unlock(&hdev
->vport_lock
);
10014 vlan
->hd_tbl_status
= true;
10017 mutex_unlock(&hdev
->vport_lock
);
10022 static void hclge_rm_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
10025 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10026 struct hclge_dev
*hdev
= vport
->back
;
10028 mutex_lock(&hdev
->vport_lock
);
10030 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10031 if (vlan
->vlan_id
== vlan_id
) {
10032 if (is_write_tbl
&& vlan
->hd_tbl_status
)
10033 hclge_set_vlan_filter_hw(hdev
,
10034 htons(ETH_P_8021Q
),
10039 list_del(&vlan
->node
);
10045 mutex_unlock(&hdev
->vport_lock
);
10048 void hclge_rm_vport_all_vlan_table(struct hclge_vport
*vport
, bool is_del_list
)
10050 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10051 struct hclge_dev
*hdev
= vport
->back
;
10053 mutex_lock(&hdev
->vport_lock
);
10055 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10056 if (vlan
->hd_tbl_status
)
10057 hclge_set_vlan_filter_hw(hdev
,
10058 htons(ETH_P_8021Q
),
10063 vlan
->hd_tbl_status
= false;
10065 list_del(&vlan
->node
);
10069 clear_bit(vport
->vport_id
, hdev
->vf_vlan_full
);
10070 mutex_unlock(&hdev
->vport_lock
);
10073 void hclge_uninit_vport_vlan_table(struct hclge_dev
*hdev
)
10075 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10076 struct hclge_vport
*vport
;
10079 mutex_lock(&hdev
->vport_lock
);
10081 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10082 vport
= &hdev
->vport
[i
];
10083 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10084 list_del(&vlan
->node
);
10089 mutex_unlock(&hdev
->vport_lock
);
10092 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev
*hdev
)
10094 struct hclge_vlan_info
*vlan_info
;
10095 struct hclge_vport
*vport
;
10102 /* PF should restore all vfs port base vlan */
10103 for (vf_id
= 0; vf_id
< hdev
->num_alloc_vfs
; vf_id
++) {
10104 vport
= &hdev
->vport
[vf_id
+ HCLGE_VF_VPORT_START_NUM
];
10105 vlan_info
= vport
->port_base_vlan_cfg
.tbl_sta
?
10106 &vport
->port_base_vlan_cfg
.vlan_info
:
10107 &vport
->port_base_vlan_cfg
.old_vlan_info
;
10109 vlan_id
= vlan_info
->vlan_tag
;
10110 vlan_proto
= vlan_info
->vlan_proto
;
10111 state
= vport
->port_base_vlan_cfg
.state
;
10113 if (state
!= HNAE3_PORT_BASE_VLAN_DISABLE
) {
10114 clear_bit(vport
->vport_id
, hdev
->vlan_table
[vlan_id
]);
10115 ret
= hclge_set_vlan_filter_hw(hdev
, htons(vlan_proto
),
10118 vport
->port_base_vlan_cfg
.tbl_sta
= ret
== 0;
10123 void hclge_restore_vport_vlan_table(struct hclge_vport
*vport
)
10125 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10126 struct hclge_dev
*hdev
= vport
->back
;
10129 mutex_lock(&hdev
->vport_lock
);
10131 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10132 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10133 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10135 vlan
->vlan_id
, false);
10138 vlan
->hd_tbl_status
= true;
10142 mutex_unlock(&hdev
->vport_lock
);
10145 /* For global reset and imp reset, hardware will clear the mac table,
10146 * so we change the mac address state from ACTIVE to TO_ADD, then they
10147 * can be restored in the service task after reset complete. Furtherly,
10148 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10149 * be restored after reset, so just remove these mac nodes from mac_list.
10151 static void hclge_mac_node_convert_for_reset(struct list_head
*list
)
10153 struct hclge_mac_node
*mac_node
, *tmp
;
10155 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
10156 if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
10157 mac_node
->state
= HCLGE_MAC_TO_ADD
;
10158 } else if (mac_node
->state
== HCLGE_MAC_TO_DEL
) {
10159 list_del(&mac_node
->node
);
10165 void hclge_restore_mac_table_common(struct hclge_vport
*vport
)
10167 spin_lock_bh(&vport
->mac_list_lock
);
10169 hclge_mac_node_convert_for_reset(&vport
->uc_mac_list
);
10170 hclge_mac_node_convert_for_reset(&vport
->mc_mac_list
);
10171 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
10173 spin_unlock_bh(&vport
->mac_list_lock
);
10176 static void hclge_restore_hw_table(struct hclge_dev
*hdev
)
10178 struct hclge_vport
*vport
= &hdev
->vport
[0];
10179 struct hnae3_handle
*handle
= &vport
->nic
;
10181 hclge_restore_mac_table_common(vport
);
10182 hclge_restore_vport_port_base_vlan_config(hdev
);
10183 hclge_restore_vport_vlan_table(vport
);
10184 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
10185 hclge_restore_fd_entries(handle
);
10188 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
10190 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10192 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10193 vport
->rxvlan_cfg
.strip_tag1_en
= false;
10194 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
10195 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
10197 vport
->rxvlan_cfg
.strip_tag1_en
= enable
;
10198 vport
->rxvlan_cfg
.strip_tag2_en
= true;
10199 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
10202 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
10203 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
10204 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
10205 vport
->rxvlan_cfg
.rx_vlan_offload_en
= enable
;
10207 return hclge_set_vlan_rx_offload_cfg(vport
);
10210 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport
*vport
)
10212 struct hclge_dev
*hdev
= vport
->back
;
10214 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
10215 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
, &vport
->state
);
10218 static int hclge_update_vlan_filter_entries(struct hclge_vport
*vport
,
10219 u16 port_base_vlan_state
,
10220 struct hclge_vlan_info
*new_info
,
10221 struct hclge_vlan_info
*old_info
)
10223 struct hclge_dev
*hdev
= vport
->back
;
10226 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_ENABLE
) {
10227 hclge_rm_vport_all_vlan_table(vport
, false);
10228 /* force clear VLAN 0 */
10229 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, true, 0);
10232 return hclge_set_vlan_filter_hw(hdev
,
10233 htons(new_info
->vlan_proto
),
10235 new_info
->vlan_tag
,
10239 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10241 /* force add VLAN 0 */
10242 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, false, 0);
10246 ret
= hclge_set_vlan_filter_hw(hdev
, htons(old_info
->vlan_proto
),
10247 vport
->vport_id
, old_info
->vlan_tag
,
10252 return hclge_add_vport_all_vlan_table(vport
);
10255 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info
*new_cfg
,
10256 const struct hclge_vlan_info
*old_cfg
)
10258 if (new_cfg
->vlan_tag
!= old_cfg
->vlan_tag
)
10261 if (new_cfg
->vlan_tag
== 0 && (new_cfg
->qos
== 0 || old_cfg
->qos
== 0))
10267 static int hclge_modify_port_base_vlan_tag(struct hclge_vport
*vport
,
10268 struct hclge_vlan_info
*new_info
,
10269 struct hclge_vlan_info
*old_info
)
10271 struct hclge_dev
*hdev
= vport
->back
;
10274 /* add new VLAN tag */
10275 ret
= hclge_set_vlan_filter_hw(hdev
, htons(new_info
->vlan_proto
),
10276 vport
->vport_id
, new_info
->vlan_tag
,
10281 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10282 /* remove old VLAN tag */
10283 if (old_info
->vlan_tag
== 0)
10284 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
,
10287 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10289 old_info
->vlan_tag
, true);
10291 dev_err(&hdev
->pdev
->dev
,
10292 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10293 vport
->vport_id
, old_info
->vlan_tag
, ret
);
10298 int hclge_update_port_base_vlan_cfg(struct hclge_vport
*vport
, u16 state
,
10299 struct hclge_vlan_info
*vlan_info
)
10301 struct hnae3_handle
*nic
= &vport
->nic
;
10302 struct hclge_vlan_info
*old_vlan_info
;
10305 old_vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10307 ret
= hclge_vlan_offload_cfg(vport
, state
, vlan_info
->vlan_tag
,
10312 if (!hclge_need_update_vlan_filter(vlan_info
, old_vlan_info
))
10315 if (state
== HNAE3_PORT_BASE_VLAN_MODIFY
)
10316 ret
= hclge_modify_port_base_vlan_tag(vport
, vlan_info
,
10319 ret
= hclge_update_vlan_filter_entries(vport
, state
, vlan_info
,
10325 vport
->port_base_vlan_cfg
.state
= state
;
10326 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
10327 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
10329 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
10331 vport
->port_base_vlan_cfg
.old_vlan_info
= *old_vlan_info
;
10332 vport
->port_base_vlan_cfg
.vlan_info
= *vlan_info
;
10333 vport
->port_base_vlan_cfg
.tbl_sta
= true;
10334 hclge_set_vport_vlan_fltr_change(vport
);
10339 static u16
hclge_get_port_base_vlan_state(struct hclge_vport
*vport
,
10340 enum hnae3_port_base_vlan_state state
,
10343 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10345 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10347 return HNAE3_PORT_BASE_VLAN_ENABLE
;
10351 return HNAE3_PORT_BASE_VLAN_DISABLE
;
10353 if (vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
== vlan
&&
10354 vport
->port_base_vlan_cfg
.vlan_info
.qos
== qos
)
10355 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10357 return HNAE3_PORT_BASE_VLAN_MODIFY
;
10360 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
10361 u16 vlan
, u8 qos
, __be16 proto
)
10363 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
10364 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10365 struct hclge_dev
*hdev
= vport
->back
;
10366 struct hclge_vlan_info vlan_info
;
10370 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
10371 return -EOPNOTSUPP
;
10373 vport
= hclge_get_vf_vport(hdev
, vfid
);
10377 /* qos is a 3 bits value, so can not be bigger than 7 */
10378 if (vlan
> VLAN_N_VID
- 1 || qos
> 7)
10380 if (proto
!= htons(ETH_P_8021Q
))
10381 return -EPROTONOSUPPORT
;
10383 state
= hclge_get_port_base_vlan_state(vport
,
10384 vport
->port_base_vlan_cfg
.state
,
10386 if (state
== HNAE3_PORT_BASE_VLAN_NOCHANGE
)
10389 vlan_info
.vlan_tag
= vlan
;
10390 vlan_info
.qos
= qos
;
10391 vlan_info
.vlan_proto
= ntohs(proto
);
10393 ret
= hclge_update_port_base_vlan_cfg(vport
, state
, &vlan_info
);
10395 dev_err(&hdev
->pdev
->dev
,
10396 "failed to update port base vlan for vf %d, ret = %d\n",
10401 /* there is a timewindow for PF to know VF unalive, it may
10402 * cause send mailbox fail, but it doesn't matter, VF will
10403 * query it when reinit.
10404 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10407 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
10408 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
10409 (void)hclge_push_vf_port_base_vlan_info(&hdev
->vport
[0],
10414 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN
,
10415 &vport
->need_notify
);
10420 static void hclge_clear_vf_vlan(struct hclge_dev
*hdev
)
10422 struct hclge_vlan_info
*vlan_info
;
10423 struct hclge_vport
*vport
;
10427 /* clear port base vlan for all vf */
10428 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
10429 vport
= &hdev
->vport
[vf
];
10430 vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10432 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10434 vlan_info
->vlan_tag
, true);
10436 dev_err(&hdev
->pdev
->dev
,
10437 "failed to clear vf vlan for vf%d, ret = %d\n",
10438 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
10442 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
10443 u16 vlan_id
, bool is_kill
)
10445 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10446 struct hclge_dev
*hdev
= vport
->back
;
10447 bool writen_to_tbl
= false;
10450 /* When device is resetting or reset failed, firmware is unable to
10451 * handle mailbox. Just record the vlan id, and remove it after
10454 if ((test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
10455 test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
10456 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10460 /* when port base vlan enabled, we use port base vlan as the vlan
10461 * filter entry. In this case, we don't update vlan filter table
10462 * when user add new vlan or remove exist vlan, just update the vport
10463 * vlan list. The vlan id in vlan list will be writen in vlan filter
10464 * table until port base vlan disabled
10466 if (handle
->port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10467 ret
= hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
,
10469 writen_to_tbl
= true;
10474 hclge_add_vport_vlan_table(vport
, vlan_id
,
10476 else if (is_kill
&& vlan_id
!= 0)
10477 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10478 } else if (is_kill
) {
10479 /* when remove hw vlan filter failed, record the vlan id,
10480 * and try to remove it from hw later, to be consistence
10483 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10486 hclge_set_vport_vlan_fltr_change(vport
);
10491 static void hclge_sync_vlan_fltr_state(struct hclge_dev
*hdev
)
10493 struct hclge_vport
*vport
;
10497 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10498 vport
= &hdev
->vport
[i
];
10499 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10503 ret
= hclge_enable_vport_vlan_filter(vport
,
10504 vport
->req_vlan_fltr_en
);
10506 dev_err(&hdev
->pdev
->dev
,
10507 "failed to sync vlan filter state for vport%u, ret = %d\n",
10508 vport
->vport_id
, ret
);
10509 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10516 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
)
10518 #define HCLGE_MAX_SYNC_COUNT 60
10520 int i
, ret
, sync_cnt
= 0;
10523 /* start from vport 1 for PF is always alive */
10524 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10525 struct hclge_vport
*vport
= &hdev
->vport
[i
];
10527 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10529 while (vlan_id
!= VLAN_N_VID
) {
10530 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10531 vport
->vport_id
, vlan_id
,
10533 if (ret
&& ret
!= -EINVAL
)
10536 clear_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10537 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10538 hclge_set_vport_vlan_fltr_change(vport
);
10541 if (sync_cnt
>= HCLGE_MAX_SYNC_COUNT
)
10544 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10549 hclge_sync_vlan_fltr_state(hdev
);
10552 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
)
10554 struct hclge_config_max_frm_size_cmd
*req
;
10555 struct hclge_desc desc
;
10557 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
10559 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
10560 req
->max_frm_size
= cpu_to_le16(new_mps
);
10561 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
10563 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10566 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
10568 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10570 return hclge_set_vport_mtu(vport
, new_mtu
);
10573 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
)
10575 struct hclge_dev
*hdev
= vport
->back
;
10576 int i
, max_frm_size
, ret
;
10578 /* HW supprt 2 layer vlan */
10579 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
10580 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
10581 max_frm_size
> hdev
->ae_dev
->dev_specs
.max_frm_size
)
10584 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
10585 mutex_lock(&hdev
->vport_lock
);
10586 /* VF's mps must fit within hdev->mps */
10587 if (vport
->vport_id
&& max_frm_size
> hdev
->mps
) {
10588 mutex_unlock(&hdev
->vport_lock
);
10590 } else if (vport
->vport_id
) {
10591 vport
->mps
= max_frm_size
;
10592 mutex_unlock(&hdev
->vport_lock
);
10596 /* PF's mps must be greater then VF's mps */
10597 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++)
10598 if (max_frm_size
< hdev
->vport
[i
].mps
) {
10599 dev_err(&hdev
->pdev
->dev
,
10600 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10601 i
, hdev
->vport
[i
].mps
);
10602 mutex_unlock(&hdev
->vport_lock
);
10606 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
10608 ret
= hclge_set_mac_mtu(hdev
, max_frm_size
);
10610 dev_err(&hdev
->pdev
->dev
,
10611 "Change mtu fail, ret =%d\n", ret
);
10615 hdev
->mps
= max_frm_size
;
10616 vport
->mps
= max_frm_size
;
10618 ret
= hclge_buffer_alloc(hdev
);
10620 dev_err(&hdev
->pdev
->dev
,
10621 "Allocate buffer fail, ret =%d\n", ret
);
10624 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
10625 mutex_unlock(&hdev
->vport_lock
);
10629 static int hclge_reset_tqp_cmd_send(struct hclge_dev
*hdev
, u16 queue_id
,
10632 struct hclge_reset_tqp_queue_cmd
*req
;
10633 struct hclge_desc desc
;
10636 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
10638 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10639 req
->tqp_id
= cpu_to_le16(queue_id
);
10641 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, 1U);
10643 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10645 dev_err(&hdev
->pdev
->dev
,
10646 "Send tqp reset cmd error, status =%d\n", ret
);
10653 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
,
10656 struct hclge_reset_tqp_queue_cmd
*req
;
10657 struct hclge_desc desc
;
10660 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
10662 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10663 req
->tqp_id
= cpu_to_le16(queue_id
);
10665 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10667 dev_err(&hdev
->pdev
->dev
,
10668 "Get reset status error, status =%d\n", ret
);
10672 *reset_status
= hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
10677 u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
10679 struct hclge_comm_tqp
*tqp
;
10680 struct hnae3_queue
*queue
;
10682 queue
= handle
->kinfo
.tqp
[queue_id
];
10683 tqp
= container_of(queue
, struct hclge_comm_tqp
, q
);
10688 static int hclge_reset_tqp_cmd(struct hnae3_handle
*handle
)
10690 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10691 struct hclge_dev
*hdev
= vport
->back
;
10692 u16 reset_try_times
= 0;
10698 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
10699 queue_gid
= hclge_covert_handle_qid_global(handle
, i
);
10700 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, true);
10702 dev_err(&hdev
->pdev
->dev
,
10703 "failed to send reset tqp cmd, ret = %d\n",
10708 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
10709 ret
= hclge_get_reset_status(hdev
, queue_gid
,
10717 /* Wait for tqp hw reset */
10718 usleep_range(1000, 1200);
10721 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
10722 dev_err(&hdev
->pdev
->dev
,
10723 "wait for tqp hw reset timeout\n");
10727 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, false);
10729 dev_err(&hdev
->pdev
->dev
,
10730 "failed to deassert soft reset, ret = %d\n",
10734 reset_try_times
= 0;
10739 static int hclge_reset_rcb(struct hnae3_handle
*handle
)
10741 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10742 #define HCLGE_RESET_RCB_SUCCESS 1U
10744 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10745 struct hclge_dev
*hdev
= vport
->back
;
10746 struct hclge_reset_cmd
*req
;
10747 struct hclge_desc desc
;
10752 queue_gid
= hclge_covert_handle_qid_global(handle
, 0);
10754 req
= (struct hclge_reset_cmd
*)desc
.data
;
10755 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
10756 hnae3_set_bit(req
->fun_reset_rcb
, HCLGE_CFG_RESET_RCB_B
, 1);
10757 req
->fun_reset_rcb_vqid_start
= cpu_to_le16(queue_gid
);
10758 req
->fun_reset_rcb_vqid_num
= cpu_to_le16(handle
->kinfo
.num_tqps
);
10760 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10762 dev_err(&hdev
->pdev
->dev
,
10763 "failed to send rcb reset cmd, ret = %d\n", ret
);
10767 return_status
= req
->fun_reset_rcb_return_status
;
10768 if (return_status
== HCLGE_RESET_RCB_SUCCESS
)
10771 if (return_status
!= HCLGE_RESET_RCB_NOT_SUPPORT
) {
10772 dev_err(&hdev
->pdev
->dev
, "failed to reset rcb, ret = %u\n",
10777 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10778 * again to reset all tqps
10780 return hclge_reset_tqp_cmd(handle
);
10783 int hclge_reset_tqp(struct hnae3_handle
*handle
)
10785 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10786 struct hclge_dev
*hdev
= vport
->back
;
10789 /* only need to disable PF's tqp */
10790 if (!vport
->vport_id
) {
10791 ret
= hclge_tqp_enable(handle
, false);
10793 dev_err(&hdev
->pdev
->dev
,
10794 "failed to disable tqp, ret = %d\n", ret
);
10799 return hclge_reset_rcb(handle
);
10802 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
10804 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10805 struct hclge_dev
*hdev
= vport
->back
;
10807 return hdev
->fw_version
;
10810 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10812 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10817 phy_set_asym_pause(phydev
, rx_en
, tx_en
);
10820 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10824 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
10827 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
10829 dev_err(&hdev
->pdev
->dev
,
10830 "configure pauseparam error, ret = %d.\n", ret
);
10835 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
10837 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10838 u16 remote_advertising
= 0;
10839 u16 local_advertising
;
10840 u32 rx_pause
, tx_pause
;
10846 if (!phydev
->autoneg
)
10847 return hclge_mac_pause_setup_hw(hdev
);
10849 local_advertising
= linkmode_adv_to_lcl_adv_t(phydev
->advertising
);
10852 remote_advertising
= LPA_PAUSE_CAP
;
10854 if (phydev
->asym_pause
)
10855 remote_advertising
|= LPA_PAUSE_ASYM
;
10857 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
10858 remote_advertising
);
10859 tx_pause
= flowctl
& FLOW_CTRL_TX
;
10860 rx_pause
= flowctl
& FLOW_CTRL_RX
;
10862 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
10867 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
10870 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
10871 u32
*rx_en
, u32
*tx_en
)
10873 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10874 struct hclge_dev
*hdev
= vport
->back
;
10875 u8 media_type
= hdev
->hw
.mac
.media_type
;
10877 *auto_neg
= (media_type
== HNAE3_MEDIA_TYPE_COPPER
) ?
10878 hclge_get_autoneg(handle
) : 0;
10880 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
10886 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
10889 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
10892 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
10901 static void hclge_record_user_pauseparam(struct hclge_dev
*hdev
,
10902 u32 rx_en
, u32 tx_en
)
10904 if (rx_en
&& tx_en
)
10905 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
10906 else if (rx_en
&& !tx_en
)
10907 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
10908 else if (!rx_en
&& tx_en
)
10909 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
10911 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
10913 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
10916 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
10917 u32 rx_en
, u32 tx_en
)
10919 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10920 struct hclge_dev
*hdev
= vport
->back
;
10921 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10924 if (phydev
|| hnae3_dev_phy_imp_supported(hdev
)) {
10925 fc_autoneg
= hclge_get_autoneg(handle
);
10926 if (auto_neg
!= fc_autoneg
) {
10927 dev_info(&hdev
->pdev
->dev
,
10928 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10929 return -EOPNOTSUPP
;
10933 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
10934 dev_info(&hdev
->pdev
->dev
,
10935 "Priority flow control enabled. Cannot set link flow control.\n");
10936 return -EOPNOTSUPP
;
10939 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
10941 hclge_record_user_pauseparam(hdev
, rx_en
, tx_en
);
10943 if (!auto_neg
|| hnae3_dev_phy_imp_supported(hdev
))
10944 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
10947 return phy_start_aneg(phydev
);
10949 return -EOPNOTSUPP
;
10952 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
10953 u8
*auto_neg
, u32
*speed
, u8
*duplex
, u32
*lane_num
)
10955 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10956 struct hclge_dev
*hdev
= vport
->back
;
10959 *speed
= hdev
->hw
.mac
.speed
;
10961 *duplex
= hdev
->hw
.mac
.duplex
;
10963 *auto_neg
= hdev
->hw
.mac
.autoneg
;
10965 *lane_num
= hdev
->hw
.mac
.lane_num
;
10968 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
10971 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10972 struct hclge_dev
*hdev
= vport
->back
;
10974 /* When nic is down, the service task is not running, doesn't update
10975 * the port information per second. Query the port information before
10976 * return the media type, ensure getting the correct media information.
10978 hclge_update_port_info(hdev
);
10981 *media_type
= hdev
->hw
.mac
.media_type
;
10984 *module_type
= hdev
->hw
.mac
.module_type
;
10987 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
10988 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
10990 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10991 struct hclge_dev
*hdev
= vport
->back
;
10992 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10993 int mdix_ctrl
, mdix
, is_resolved
;
10994 unsigned int retval
;
10997 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
10998 *tp_mdix
= ETH_TP_MDI_INVALID
;
11002 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
11004 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
11005 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
11006 HCLGE_PHY_MDIX_CTRL_S
);
11008 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
11009 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
11010 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
11012 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
11014 switch (mdix_ctrl
) {
11016 *tp_mdix_ctrl
= ETH_TP_MDI
;
11019 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
11022 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
11025 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
11030 *tp_mdix
= ETH_TP_MDI_INVALID
;
11032 *tp_mdix
= ETH_TP_MDI_X
;
11034 *tp_mdix
= ETH_TP_MDI
;
11037 static void hclge_info_show(struct hclge_dev
*hdev
)
11039 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
11040 struct device
*dev
= &hdev
->pdev
->dev
;
11042 dev_info(dev
, "PF info begin:\n");
11044 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
11045 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
11046 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
11047 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
11048 dev_info(dev
, "Numbers of VF for this PF: %u\n", hdev
->num_req_vfs
);
11049 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
11050 dev_info(dev
, "Total buffer size for TX/RX: %u\n", hdev
->pkt_buf_size
);
11051 dev_info(dev
, "TX buffer size for each TC: %u\n", hdev
->tx_buf_size
);
11052 dev_info(dev
, "DV buffer size for each TC: %u\n", hdev
->dv_buf_size
);
11053 dev_info(dev
, "This is %s PF\n",
11054 hdev
->flag
& HCLGE_FLAG_MAIN
? "main" : "not main");
11055 dev_info(dev
, "DCB %s\n",
11056 handle
->kinfo
.tc_info
.dcb_ets_active
? "enable" : "disable");
11057 dev_info(dev
, "MQPRIO %s\n",
11058 handle
->kinfo
.tc_info
.mqprio_active
? "enable" : "disable");
11059 dev_info(dev
, "Default tx spare buffer size: %u\n",
11060 hdev
->tx_spare_buf_size
);
11062 dev_info(dev
, "PF info end.\n");
11065 static int hclge_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
11066 struct hclge_vport
*vport
)
11068 struct hnae3_client
*client
= vport
->nic
.client
;
11069 struct hclge_dev
*hdev
= ae_dev
->priv
;
11070 int rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11073 ret
= client
->ops
->init_instance(&vport
->nic
);
11077 set_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11078 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11079 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11084 /* Enable nic hw error interrupts */
11085 ret
= hclge_config_nic_hw_error(hdev
, true);
11087 dev_err(&ae_dev
->pdev
->dev
,
11088 "fail(%d) to enable hw error interrupts\n", ret
);
11092 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11094 if (netif_msg_drv(&hdev
->vport
->nic
))
11095 hclge_info_show(hdev
);
11100 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11101 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11102 msleep(HCLGE_WAIT_RESET_DONE
);
11104 client
->ops
->uninit_instance(&vport
->nic
, 0);
11109 static int hclge_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
11110 struct hclge_vport
*vport
)
11112 struct hclge_dev
*hdev
= ae_dev
->priv
;
11113 struct hnae3_client
*client
;
11117 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
11121 client
= hdev
->roce_client
;
11122 ret
= hclge_init_roce_base_info(vport
);
11126 rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11127 ret
= client
->ops
->init_instance(&vport
->roce
);
11131 set_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11132 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11133 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11135 goto init_roce_err
;
11138 /* Enable roce ras interrupts */
11139 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
11141 dev_err(&ae_dev
->pdev
->dev
,
11142 "fail(%d) to enable roce ras interrupts\n", ret
);
11143 goto init_roce_err
;
11146 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11151 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11152 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11153 msleep(HCLGE_WAIT_RESET_DONE
);
11155 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11160 static int hclge_init_client_instance(struct hnae3_client
*client
,
11161 struct hnae3_ae_dev
*ae_dev
)
11163 struct hclge_dev
*hdev
= ae_dev
->priv
;
11164 struct hclge_vport
*vport
= &hdev
->vport
[0];
11167 switch (client
->type
) {
11168 case HNAE3_CLIENT_KNIC
:
11169 hdev
->nic_client
= client
;
11170 vport
->nic
.client
= client
;
11171 ret
= hclge_init_nic_client_instance(ae_dev
, vport
);
11175 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11180 case HNAE3_CLIENT_ROCE
:
11181 if (hnae3_dev_roce_supported(hdev
)) {
11182 hdev
->roce_client
= client
;
11183 vport
->roce
.client
= client
;
11186 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11198 hdev
->nic_client
= NULL
;
11199 vport
->nic
.client
= NULL
;
11202 hdev
->roce_client
= NULL
;
11203 vport
->roce
.client
= NULL
;
11207 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
11208 struct hnae3_ae_dev
*ae_dev
)
11210 struct hclge_dev
*hdev
= ae_dev
->priv
;
11211 struct hclge_vport
*vport
= &hdev
->vport
[0];
11213 if (hdev
->roce_client
) {
11214 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11215 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11216 msleep(HCLGE_WAIT_RESET_DONE
);
11218 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11219 hdev
->roce_client
= NULL
;
11220 vport
->roce
.client
= NULL
;
11222 if (client
->type
== HNAE3_CLIENT_ROCE
)
11224 if (hdev
->nic_client
&& client
->ops
->uninit_instance
) {
11225 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11226 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11227 msleep(HCLGE_WAIT_RESET_DONE
);
11229 client
->ops
->uninit_instance(&vport
->nic
, 0);
11230 hdev
->nic_client
= NULL
;
11231 vport
->nic
.client
= NULL
;
11235 static int hclge_dev_mem_map(struct hclge_dev
*hdev
)
11237 struct pci_dev
*pdev
= hdev
->pdev
;
11238 struct hclge_hw
*hw
= &hdev
->hw
;
11240 /* for device does not have device memory, return directly */
11241 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGE_MEM_BAR
)))
11245 devm_ioremap_wc(&pdev
->dev
,
11246 pci_resource_start(pdev
, HCLGE_MEM_BAR
),
11247 pci_resource_len(pdev
, HCLGE_MEM_BAR
));
11248 if (!hw
->hw
.mem_base
) {
11249 dev_err(&pdev
->dev
, "failed to map device memory\n");
11256 static int hclge_pci_init(struct hclge_dev
*hdev
)
11258 struct pci_dev
*pdev
= hdev
->pdev
;
11259 struct hclge_hw
*hw
;
11262 ret
= pci_enable_device(pdev
);
11264 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
11268 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
11270 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
11272 dev_err(&pdev
->dev
,
11273 "can't set consistent PCI DMA");
11274 goto err_disable_device
;
11276 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
11279 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
11281 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
11282 goto err_disable_device
;
11285 pci_set_master(pdev
);
11287 hw
->hw
.io_base
= pcim_iomap(pdev
, 2, 0);
11288 if (!hw
->hw
.io_base
) {
11289 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
11291 goto err_release_regions
;
11294 ret
= hclge_dev_mem_map(hdev
);
11296 goto err_unmap_io_base
;
11298 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
11303 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11304 err_release_regions
:
11305 pci_release_regions(pdev
);
11306 err_disable_device
:
11307 pci_disable_device(pdev
);
11312 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
11314 struct pci_dev
*pdev
= hdev
->pdev
;
11316 if (hdev
->hw
.hw
.mem_base
)
11317 devm_iounmap(&pdev
->dev
, hdev
->hw
.hw
.mem_base
);
11319 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11320 pci_free_irq_vectors(pdev
);
11321 pci_release_mem_regions(pdev
);
11322 pci_disable_device(pdev
);
11325 static void hclge_state_init(struct hclge_dev
*hdev
)
11327 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
11328 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11329 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
11330 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11331 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
11332 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
11333 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
11336 static void hclge_state_uninit(struct hclge_dev
*hdev
)
11338 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11339 set_bit(HCLGE_STATE_REMOVING
, &hdev
->state
);
11341 if (hdev
->reset_timer
.function
)
11342 del_timer_sync(&hdev
->reset_timer
);
11343 if (hdev
->service_task
.work
.func
)
11344 cancel_delayed_work_sync(&hdev
->service_task
);
11347 static void hclge_reset_prepare_general(struct hnae3_ae_dev
*ae_dev
,
11348 enum hnae3_reset_type rst_type
)
11350 #define HCLGE_RESET_RETRY_WAIT_MS 500
11351 #define HCLGE_RESET_RETRY_CNT 5
11353 struct hclge_dev
*hdev
= ae_dev
->priv
;
11357 while (retry_cnt
++ < HCLGE_RESET_RETRY_CNT
) {
11358 down(&hdev
->reset_sem
);
11359 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11360 hdev
->reset_type
= rst_type
;
11361 ret
= hclge_reset_prepare(hdev
);
11362 if (!ret
&& !hdev
->reset_pending
)
11365 dev_err(&hdev
->pdev
->dev
,
11366 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11367 ret
, hdev
->reset_pending
, retry_cnt
);
11368 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11369 up(&hdev
->reset_sem
);
11370 msleep(HCLGE_RESET_RETRY_WAIT_MS
);
11373 /* disable misc vector before reset done */
11374 hclge_enable_vector(&hdev
->misc_vector
, false);
11375 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
11377 if (hdev
->reset_type
== HNAE3_FLR_RESET
)
11378 hdev
->rst_stats
.flr_rst_cnt
++;
11381 static void hclge_reset_done(struct hnae3_ae_dev
*ae_dev
)
11383 struct hclge_dev
*hdev
= ae_dev
->priv
;
11386 hclge_enable_vector(&hdev
->misc_vector
, true);
11388 ret
= hclge_reset_rebuild(hdev
);
11390 dev_err(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n", ret
);
11392 hdev
->reset_type
= HNAE3_NONE_RESET
;
11393 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11394 up(&hdev
->reset_sem
);
11397 static void hclge_clear_resetting_state(struct hclge_dev
*hdev
)
11401 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
11402 struct hclge_vport
*vport
= &hdev
->vport
[i
];
11405 /* Send cmd to clear vport's FUNC_RST_ING */
11406 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, false);
11408 dev_warn(&hdev
->pdev
->dev
,
11409 "clear vport(%u) rst failed %d!\n",
11410 vport
->vport_id
, ret
);
11414 static int hclge_clear_hw_resource(struct hclge_dev
*hdev
)
11416 struct hclge_desc desc
;
11419 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CLEAR_HW_RESOURCE
, false);
11421 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11422 /* This new command is only supported by new firmware, it will
11423 * fail with older firmware. Error value -EOPNOSUPP can only be
11424 * returned by older firmware running this command, to keep code
11425 * backward compatible we will override this value and return
11428 if (ret
&& ret
!= -EOPNOTSUPP
) {
11429 dev_err(&hdev
->pdev
->dev
,
11430 "failed to clear hw resource, ret = %d\n", ret
);
11436 static void hclge_init_rxd_adv_layout(struct hclge_dev
*hdev
)
11438 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11439 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 1);
11442 static void hclge_uninit_rxd_adv_layout(struct hclge_dev
*hdev
)
11444 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11445 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 0);
11448 static struct hclge_wol_info
*hclge_get_wol_info(struct hnae3_handle
*handle
)
11450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11452 return &vport
->back
->hw
.mac
.wol
;
11455 static int hclge_get_wol_supported_mode(struct hclge_dev
*hdev
,
11456 u32
*wol_supported
)
11458 struct hclge_query_wol_supported_cmd
*wol_supported_cmd
;
11459 struct hclge_desc desc
;
11462 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_GET_SUPPORTED_MODE
,
11464 wol_supported_cmd
= (struct hclge_query_wol_supported_cmd
*)desc
.data
;
11466 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11468 dev_err(&hdev
->pdev
->dev
,
11469 "failed to query wol supported, ret = %d\n", ret
);
11473 *wol_supported
= le32_to_cpu(wol_supported_cmd
->supported_wake_mode
);
11478 static int hclge_set_wol_cfg(struct hclge_dev
*hdev
,
11479 struct hclge_wol_info
*wol_info
)
11481 struct hclge_wol_cfg_cmd
*wol_cfg_cmd
;
11482 struct hclge_desc desc
;
11485 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_CFG
, false);
11486 wol_cfg_cmd
= (struct hclge_wol_cfg_cmd
*)desc
.data
;
11487 wol_cfg_cmd
->wake_on_lan_mode
= cpu_to_le32(wol_info
->wol_current_mode
);
11488 wol_cfg_cmd
->sopass_size
= wol_info
->wol_sopass_size
;
11489 memcpy(wol_cfg_cmd
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11491 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11493 dev_err(&hdev
->pdev
->dev
,
11494 "failed to set wol config, ret = %d\n", ret
);
11499 static int hclge_update_wol(struct hclge_dev
*hdev
)
11501 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11503 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11506 return hclge_set_wol_cfg(hdev
, wol_info
);
11509 static int hclge_init_wol(struct hclge_dev
*hdev
)
11511 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11514 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11517 memset(wol_info
, 0, sizeof(struct hclge_wol_info
));
11518 ret
= hclge_get_wol_supported_mode(hdev
,
11519 &wol_info
->wol_support_mode
);
11521 wol_info
->wol_support_mode
= 0;
11525 return hclge_update_wol(hdev
);
11528 static void hclge_get_wol(struct hnae3_handle
*handle
,
11529 struct ethtool_wolinfo
*wol
)
11531 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11533 wol
->supported
= wol_info
->wol_support_mode
;
11534 wol
->wolopts
= wol_info
->wol_current_mode
;
11535 if (wol_info
->wol_current_mode
& WAKE_MAGICSECURE
)
11536 memcpy(wol
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11539 static int hclge_set_wol(struct hnae3_handle
*handle
,
11540 struct ethtool_wolinfo
*wol
)
11542 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11543 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11547 wol_mode
= wol
->wolopts
;
11548 if (wol_mode
& ~wol_info
->wol_support_mode
)
11551 wol_info
->wol_current_mode
= wol_mode
;
11552 if (wol_mode
& WAKE_MAGICSECURE
) {
11553 memcpy(wol_info
->wol_sopass
, wol
->sopass
, SOPASS_MAX
);
11554 wol_info
->wol_sopass_size
= SOPASS_MAX
;
11556 wol_info
->wol_sopass_size
= 0;
11559 ret
= hclge_set_wol_cfg(vport
->back
, wol_info
);
11561 wol_info
->wol_current_mode
= 0;
11566 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
11568 struct pci_dev
*pdev
= ae_dev
->pdev
;
11569 struct hclge_dev
*hdev
;
11572 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
11577 hdev
->ae_dev
= ae_dev
;
11578 hdev
->reset_type
= HNAE3_NONE_RESET
;
11579 hdev
->reset_level
= HNAE3_FUNC_RESET
;
11580 ae_dev
->priv
= hdev
;
11582 /* HW supprt 2 layer vlan */
11583 hdev
->mps
= ETH_FRAME_LEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
11585 mutex_init(&hdev
->vport_lock
);
11586 spin_lock_init(&hdev
->fd_rule_lock
);
11587 sema_init(&hdev
->reset_sem
, 1);
11589 ret
= hclge_pci_init(hdev
);
11593 ret
= hclge_devlink_init(hdev
);
11595 goto err_pci_uninit
;
11597 /* Firmware command queue initialize */
11598 ret
= hclge_comm_cmd_queue_init(hdev
->pdev
, &hdev
->hw
.hw
);
11600 goto err_devlink_uninit
;
11602 /* Firmware command initialize */
11603 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
11604 true, hdev
->reset_pending
);
11606 goto err_cmd_uninit
;
11608 ret
= hclge_clear_hw_resource(hdev
);
11610 goto err_cmd_uninit
;
11612 ret
= hclge_get_cap(hdev
);
11614 goto err_cmd_uninit
;
11616 ret
= hclge_query_dev_specs(hdev
);
11618 dev_err(&pdev
->dev
, "failed to query dev specifications, ret = %d.\n",
11620 goto err_cmd_uninit
;
11623 ret
= hclge_configure(hdev
);
11625 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
11626 goto err_cmd_uninit
;
11629 ret
= hclge_init_msi(hdev
);
11631 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
11632 goto err_cmd_uninit
;
11635 ret
= hclge_misc_irq_init(hdev
);
11637 goto err_msi_uninit
;
11639 ret
= hclge_alloc_tqps(hdev
);
11641 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
11642 goto err_msi_irq_uninit
;
11645 ret
= hclge_alloc_vport(hdev
);
11647 goto err_msi_irq_uninit
;
11649 ret
= hclge_map_tqp(hdev
);
11651 goto err_msi_irq_uninit
;
11653 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
11654 if (hnae3_dev_phy_imp_supported(hdev
))
11655 ret
= hclge_update_tp_port_info(hdev
);
11657 ret
= hclge_mac_mdio_config(hdev
);
11660 goto err_msi_irq_uninit
;
11663 ret
= hclge_init_umv_space(hdev
);
11665 goto err_mdiobus_unreg
;
11667 ret
= hclge_mac_init(hdev
);
11669 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
11670 goto err_mdiobus_unreg
;
11673 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
11675 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
11676 goto err_mdiobus_unreg
;
11679 ret
= hclge_config_gro(hdev
);
11681 goto err_mdiobus_unreg
;
11683 ret
= hclge_init_vlan_config(hdev
);
11685 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
11686 goto err_mdiobus_unreg
;
11689 ret
= hclge_tm_schd_init(hdev
);
11691 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
11692 goto err_mdiobus_unreg
;
11695 ret
= hclge_comm_rss_init_cfg(&hdev
->vport
->nic
, hdev
->ae_dev
,
11698 dev_err(&pdev
->dev
, "failed to init rss cfg, ret = %d\n", ret
);
11699 goto err_mdiobus_unreg
;
11702 ret
= hclge_rss_init_hw(hdev
);
11704 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
11705 goto err_mdiobus_unreg
;
11708 ret
= init_mgr_tbl(hdev
);
11710 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
11711 goto err_mdiobus_unreg
;
11714 ret
= hclge_init_fd_config(hdev
);
11716 dev_err(&pdev
->dev
,
11717 "fd table init fail, ret=%d\n", ret
);
11718 goto err_mdiobus_unreg
;
11721 ret
= hclge_ptp_init(hdev
);
11723 goto err_mdiobus_unreg
;
11725 ret
= hclge_update_port_info(hdev
);
11727 goto err_mdiobus_unreg
;
11729 INIT_KFIFO(hdev
->mac_tnl_log
);
11731 hclge_dcb_ops_set(hdev
);
11733 timer_setup(&hdev
->reset_timer
, hclge_reset_timer
, 0);
11734 INIT_DELAYED_WORK(&hdev
->service_task
, hclge_service_task
);
11736 hclge_clear_all_event_cause(hdev
);
11737 hclge_clear_resetting_state(hdev
);
11739 /* Log and clear the hw errors those already occurred */
11740 if (hnae3_dev_ras_imp_supported(hdev
))
11741 hclge_handle_occurred_error(hdev
);
11743 hclge_handle_all_hns_hw_errors(ae_dev
);
11745 /* request delayed reset for the error recovery because an immediate
11746 * global reset on a PF affecting pending initialization of other PFs
11748 if (ae_dev
->hw_err_reset_req
) {
11749 enum hnae3_reset_type reset_level
;
11751 reset_level
= hclge_get_reset_level(ae_dev
,
11752 &ae_dev
->hw_err_reset_req
);
11753 hclge_set_def_reset_request(ae_dev
, reset_level
);
11754 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
11757 hclge_init_rxd_adv_layout(hdev
);
11759 /* Enable MISC vector(vector0) */
11760 hclge_enable_vector(&hdev
->misc_vector
, true);
11762 ret
= hclge_init_wol(hdev
);
11764 dev_warn(&pdev
->dev
,
11765 "failed to wake on lan init, ret = %d\n", ret
);
11767 hclge_state_init(hdev
);
11768 hdev
->last_reset_time
= jiffies
;
11770 dev_info(&hdev
->pdev
->dev
, "%s driver initialization finished.\n",
11771 HCLGE_DRIVER_NAME
);
11773 hclge_task_schedule(hdev
, round_jiffies_relative(HZ
));
11778 if (hdev
->hw
.mac
.phydev
)
11779 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
11780 err_msi_irq_uninit
:
11781 hclge_misc_irq_uninit(hdev
);
11783 pci_free_irq_vectors(pdev
);
11785 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
11786 err_devlink_uninit
:
11787 hclge_devlink_uninit(hdev
);
11789 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11790 pci_release_regions(pdev
);
11791 pci_disable_device(pdev
);
11793 mutex_destroy(&hdev
->vport_lock
);
11797 static void hclge_stats_clear(struct hclge_dev
*hdev
)
11799 memset(&hdev
->mac_stats
, 0, sizeof(hdev
->mac_stats
));
11800 memset(&hdev
->fec_stats
, 0, sizeof(hdev
->fec_stats
));
11803 static int hclge_set_mac_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11805 return hclge_config_switch_param(hdev
, vf
, enable
,
11806 HCLGE_SWITCH_ANTI_SPOOF_MASK
);
11809 static int hclge_set_vlan_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11811 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
11812 HCLGE_FILTER_FE_NIC_INGRESS_B
,
11816 static int hclge_set_vf_spoofchk_hw(struct hclge_dev
*hdev
, int vf
, bool enable
)
11820 ret
= hclge_set_mac_spoofchk(hdev
, vf
, enable
);
11822 dev_err(&hdev
->pdev
->dev
,
11823 "Set vf %d mac spoof check %s failed, ret=%d\n",
11824 vf
, enable
? "on" : "off", ret
);
11828 ret
= hclge_set_vlan_spoofchk(hdev
, vf
, enable
);
11830 dev_err(&hdev
->pdev
->dev
,
11831 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11832 vf
, enable
? "on" : "off", ret
);
11837 static int hclge_set_vf_spoofchk(struct hnae3_handle
*handle
, int vf
,
11840 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11841 struct hclge_dev
*hdev
= vport
->back
;
11842 u32 new_spoofchk
= enable
? 1 : 0;
11845 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
11846 return -EOPNOTSUPP
;
11848 vport
= hclge_get_vf_vport(hdev
, vf
);
11852 if (vport
->vf_info
.spoofchk
== new_spoofchk
)
11855 if (enable
&& test_bit(vport
->vport_id
, hdev
->vf_vlan_full
))
11856 dev_warn(&hdev
->pdev
->dev
,
11857 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11859 else if (enable
&& hclge_is_umv_space_full(vport
, true))
11860 dev_warn(&hdev
->pdev
->dev
,
11861 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11864 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, enable
);
11868 vport
->vf_info
.spoofchk
= new_spoofchk
;
11872 static int hclge_reset_vport_spoofchk(struct hclge_dev
*hdev
)
11874 struct hclge_vport
*vport
= hdev
->vport
;
11878 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
11881 /* resume the vf spoof check state after reset */
11882 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
11883 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
,
11884 vport
->vf_info
.spoofchk
);
11894 static int hclge_set_vf_trust(struct hnae3_handle
*handle
, int vf
, bool enable
)
11896 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11897 struct hclge_dev
*hdev
= vport
->back
;
11898 u32 new_trusted
= enable
? 1 : 0;
11900 vport
= hclge_get_vf_vport(hdev
, vf
);
11904 if (vport
->vf_info
.trusted
== new_trusted
)
11907 vport
->vf_info
.trusted
= new_trusted
;
11908 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
11909 hclge_task_schedule(hdev
, 0);
11914 static void hclge_reset_vf_rate(struct hclge_dev
*hdev
)
11919 /* reset vf rate to default value */
11920 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
11921 struct hclge_vport
*vport
= &hdev
->vport
[vf
];
11923 vport
->vf_info
.max_tx_rate
= 0;
11924 ret
= hclge_tm_qs_shaper_cfg(vport
, vport
->vf_info
.max_tx_rate
);
11926 dev_err(&hdev
->pdev
->dev
,
11927 "vf%d failed to reset to default, ret=%d\n",
11928 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
11932 static int hclge_vf_rate_param_check(struct hclge_dev
*hdev
,
11933 int min_tx_rate
, int max_tx_rate
)
11935 if (min_tx_rate
!= 0 ||
11936 max_tx_rate
< 0 || max_tx_rate
> hdev
->hw
.mac
.max_speed
) {
11937 dev_err(&hdev
->pdev
->dev
,
11938 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11939 min_tx_rate
, max_tx_rate
, hdev
->hw
.mac
.max_speed
);
11946 static int hclge_set_vf_rate(struct hnae3_handle
*handle
, int vf
,
11947 int min_tx_rate
, int max_tx_rate
, bool force
)
11949 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11950 struct hclge_dev
*hdev
= vport
->back
;
11953 ret
= hclge_vf_rate_param_check(hdev
, min_tx_rate
, max_tx_rate
);
11957 vport
= hclge_get_vf_vport(hdev
, vf
);
11961 if (!force
&& max_tx_rate
== vport
->vf_info
.max_tx_rate
)
11964 ret
= hclge_tm_qs_shaper_cfg(vport
, max_tx_rate
);
11968 vport
->vf_info
.max_tx_rate
= max_tx_rate
;
11973 static int hclge_resume_vf_rate(struct hclge_dev
*hdev
)
11975 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
11976 struct hclge_vport
*vport
;
11980 /* resume the vf max_tx_rate after reset */
11981 for (vf
= 0; vf
< pci_num_vf(hdev
->pdev
); vf
++) {
11982 vport
= hclge_get_vf_vport(hdev
, vf
);
11986 /* zero means max rate, after reset, firmware already set it to
11987 * max rate, so just continue.
11989 if (!vport
->vf_info
.max_tx_rate
)
11992 ret
= hclge_set_vf_rate(handle
, vf
, 0,
11993 vport
->vf_info
.max_tx_rate
, true);
11995 dev_err(&hdev
->pdev
->dev
,
11996 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11997 vf
, vport
->vf_info
.max_tx_rate
, ret
);
12005 static void hclge_reset_vport_state(struct hclge_dev
*hdev
)
12007 struct hclge_vport
*vport
= hdev
->vport
;
12010 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12011 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12016 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12018 struct hclge_dev
*hdev
= ae_dev
->priv
;
12019 struct pci_dev
*pdev
= ae_dev
->pdev
;
12022 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
12024 hclge_stats_clear(hdev
);
12025 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12026 * so here should not clean table in memory.
12028 if (hdev
->reset_type
== HNAE3_IMP_RESET
||
12029 hdev
->reset_type
== HNAE3_GLOBAL_RESET
) {
12030 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
12031 memset(hdev
->vf_vlan_full
, 0, sizeof(hdev
->vf_vlan_full
));
12032 bitmap_set(hdev
->vport_config_block
, 0, hdev
->num_alloc_vport
);
12033 hclge_reset_umv_space(hdev
);
12036 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
12037 true, hdev
->reset_pending
);
12039 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
12043 ret
= hclge_map_tqp(hdev
);
12045 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
12049 ret
= hclge_mac_init(hdev
);
12051 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
12055 ret
= hclge_tp_port_init(hdev
);
12057 dev_err(&pdev
->dev
, "failed to init tp port, ret = %d\n",
12062 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
12064 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
12068 ret
= hclge_config_gro(hdev
);
12072 ret
= hclge_init_vlan_config(hdev
);
12074 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
12078 ret
= hclge_tm_init_hw(hdev
, true);
12080 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
12084 ret
= hclge_rss_init_hw(hdev
);
12086 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
12090 ret
= init_mgr_tbl(hdev
);
12092 dev_err(&pdev
->dev
,
12093 "failed to reinit manager table, ret = %d\n", ret
);
12097 ret
= hclge_init_fd_config(hdev
);
12099 dev_err(&pdev
->dev
, "fd table init fail, ret=%d\n", ret
);
12103 ret
= hclge_ptp_init(hdev
);
12107 /* Log and clear the hw errors those already occurred */
12108 if (hnae3_dev_ras_imp_supported(hdev
))
12109 hclge_handle_occurred_error(hdev
);
12111 hclge_handle_all_hns_hw_errors(ae_dev
);
12113 /* Re-enable the hw error interrupts because
12114 * the interrupts get disabled on global reset.
12116 ret
= hclge_config_nic_hw_error(hdev
, true);
12118 dev_err(&pdev
->dev
,
12119 "fail(%d) to re-enable NIC hw error interrupts\n",
12124 if (hdev
->roce_client
) {
12125 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
12127 dev_err(&pdev
->dev
,
12128 "fail(%d) to re-enable roce ras interrupts\n",
12134 hclge_reset_vport_state(hdev
);
12135 ret
= hclge_reset_vport_spoofchk(hdev
);
12139 ret
= hclge_resume_vf_rate(hdev
);
12143 hclge_init_rxd_adv_layout(hdev
);
12145 ret
= hclge_update_wol(hdev
);
12147 dev_warn(&pdev
->dev
,
12148 "failed to update wol config, ret = %d\n", ret
);
12150 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
12151 HCLGE_DRIVER_NAME
);
12156 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12158 struct hclge_dev
*hdev
= ae_dev
->priv
;
12159 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
12161 hclge_reset_vf_rate(hdev
);
12162 hclge_clear_vf_vlan(hdev
);
12163 hclge_state_uninit(hdev
);
12164 hclge_ptp_uninit(hdev
);
12165 hclge_uninit_rxd_adv_layout(hdev
);
12166 hclge_uninit_mac_table(hdev
);
12167 hclge_del_all_fd_entries(hdev
);
12170 mdiobus_unregister(mac
->mdio_bus
);
12172 /* Disable MISC vector(vector0) */
12173 hclge_enable_vector(&hdev
->misc_vector
, false);
12174 synchronize_irq(hdev
->misc_vector
.vector_irq
);
12176 /* Disable all hw interrupts */
12177 hclge_config_mac_tnl_int(hdev
, false);
12178 hclge_config_nic_hw_error(hdev
, false);
12179 hclge_config_rocee_ras_interrupt(hdev
, false);
12181 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
12182 hclge_misc_irq_uninit(hdev
);
12183 hclge_devlink_uninit(hdev
);
12184 hclge_pci_uninit(hdev
);
12185 hclge_uninit_vport_vlan_table(hdev
);
12186 mutex_destroy(&hdev
->vport_lock
);
12187 ae_dev
->priv
= NULL
;
12190 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
12192 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12193 struct hclge_dev
*hdev
= vport
->back
;
12195 return min_t(u32
, hdev
->pf_rss_size_max
, vport
->alloc_tqps
);
12198 static void hclge_get_channels(struct hnae3_handle
*handle
,
12199 struct ethtool_channels
*ch
)
12201 ch
->max_combined
= hclge_get_max_channels(handle
);
12202 ch
->other_count
= 1;
12204 ch
->combined_count
= handle
->kinfo
.rss_size
;
12207 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
12208 u16
*alloc_tqps
, u16
*max_rss_size
)
12210 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12211 struct hclge_dev
*hdev
= vport
->back
;
12213 *alloc_tqps
= vport
->alloc_tqps
;
12214 *max_rss_size
= hdev
->pf_rss_size_max
;
12217 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle
*handle
)
12219 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12220 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
12221 struct hclge_dev
*hdev
= vport
->back
;
12222 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
12223 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
12227 roundup_size
= roundup_pow_of_two(vport
->nic
.kinfo
.rss_size
);
12228 roundup_size
= ilog2(roundup_size
);
12229 /* Set the RSS TC mode according to the new RSS size */
12230 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
12233 if (!(hdev
->hw_tc_map
& BIT(i
)))
12237 tc_size
[i
] = roundup_size
;
12238 tc_offset
[i
] = vport
->nic
.kinfo
.rss_size
* i
;
12241 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
12245 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
12246 bool rxfh_configured
)
12248 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
12249 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12250 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
12251 struct hclge_dev
*hdev
= vport
->back
;
12252 u16 cur_rss_size
= kinfo
->rss_size
;
12253 u16 cur_tqps
= kinfo
->num_tqps
;
12258 kinfo
->req_rss_size
= new_tqps_num
;
12260 ret
= hclge_tm_vport_map_update(hdev
);
12262 dev_err(&hdev
->pdev
->dev
, "tm vport map fail, ret =%d\n", ret
);
12266 ret
= hclge_set_rss_tc_mode_cfg(handle
);
12270 /* RSS indirection table has been configured by user */
12271 if (rxfh_configured
)
12274 /* Reinitializes the rss indirect table according to the new RSS size */
12275 rss_indir
= kcalloc(ae_dev
->dev_specs
.rss_ind_tbl_size
, sizeof(u32
),
12280 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
12281 rss_indir
[i
] = i
% kinfo
->rss_size
;
12283 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
12285 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
12292 dev_info(&hdev
->pdev
->dev
,
12293 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12294 cur_rss_size
, kinfo
->rss_size
,
12295 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
12300 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
12302 struct hclge_set_led_state_cmd
*req
;
12303 struct hclge_desc desc
;
12306 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
12308 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
12309 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
12310 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
12312 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12314 dev_err(&hdev
->pdev
->dev
,
12315 "Send set led state cmd error, ret =%d\n", ret
);
12320 enum hclge_led_status
{
12323 HCLGE_LED_NO_CHANGE
= 0xFF,
12326 static int hclge_set_led_id(struct hnae3_handle
*handle
,
12327 enum ethtool_phys_id_state status
)
12329 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12330 struct hclge_dev
*hdev
= vport
->back
;
12333 case ETHTOOL_ID_ACTIVE
:
12334 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
12335 case ETHTOOL_ID_INACTIVE
:
12336 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
12342 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
12343 unsigned long *supported
,
12344 unsigned long *advertising
)
12346 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
12347 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12348 struct hclge_dev
*hdev
= vport
->back
;
12349 unsigned int idx
= 0;
12351 for (; idx
< size
; idx
++) {
12352 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
12353 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
12357 static int hclge_gro_en(struct hnae3_handle
*handle
, bool enable
)
12359 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12360 struct hclge_dev
*hdev
= vport
->back
;
12361 bool gro_en_old
= hdev
->gro_en
;
12364 hdev
->gro_en
= enable
;
12365 ret
= hclge_config_gro(hdev
);
12367 hdev
->gro_en
= gro_en_old
;
12372 static int hclge_sync_vport_promisc_mode(struct hclge_vport
*vport
)
12374 struct hnae3_handle
*handle
= &vport
->nic
;
12375 struct hclge_dev
*hdev
= vport
->back
;
12376 bool uc_en
= false;
12377 bool mc_en
= false;
12382 if (vport
->last_promisc_flags
!= vport
->overflow_promisc_flags
) {
12383 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12384 vport
->last_promisc_flags
= vport
->overflow_promisc_flags
;
12387 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12392 if (!vport
->vport_id
) {
12393 tmp_flags
= handle
->netdev_flags
| vport
->last_promisc_flags
;
12394 ret
= hclge_set_promisc_mode(handle
, tmp_flags
& HNAE3_UPE
,
12395 tmp_flags
& HNAE3_MPE
);
12397 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
12400 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12406 if (vport
->vf_info
.trusted
) {
12407 uc_en
= vport
->vf_info
.request_uc_en
> 0 ||
12408 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
;
12409 mc_en
= vport
->vf_info
.request_mc_en
> 0 ||
12410 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
;
12412 bc_en
= vport
->vf_info
.request_bc_en
> 0;
12414 ret
= hclge_cmd_set_promisc_mode(hdev
, vport
->vport_id
, uc_en
,
12417 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12420 hclge_set_vport_vlan_fltr_change(vport
);
12425 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
)
12427 struct hclge_vport
*vport
;
12431 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12432 vport
= &hdev
->vport
[i
];
12434 ret
= hclge_sync_vport_promisc_mode(vport
);
12440 static bool hclge_module_existed(struct hclge_dev
*hdev
)
12442 struct hclge_desc desc
;
12446 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_EXIST
, true);
12447 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12449 dev_err(&hdev
->pdev
->dev
,
12450 "failed to get SFP exist state, ret = %d\n", ret
);
12454 existed
= le32_to_cpu(desc
.data
[0]);
12456 return existed
!= 0;
12459 /* need 6 bds(total 140 bytes) in one reading
12460 * return the number of bytes actually read, 0 means read failed.
12462 static u16
hclge_get_sfp_eeprom_info(struct hclge_dev
*hdev
, u32 offset
,
12465 struct hclge_desc desc
[HCLGE_SFP_INFO_CMD_NUM
];
12466 struct hclge_sfp_info_bd0_cmd
*sfp_info_bd0
;
12472 /* setup all 6 bds to read module eeprom info. */
12473 for (i
= 0; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12474 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_SFP_EEPROM
,
12477 /* bd0~bd4 need next flag */
12478 if (i
< HCLGE_SFP_INFO_CMD_NUM
- 1)
12479 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
12482 /* setup bd0, this bd contains offset and read length. */
12483 sfp_info_bd0
= (struct hclge_sfp_info_bd0_cmd
*)desc
[0].data
;
12484 sfp_info_bd0
->offset
= cpu_to_le16((u16
)offset
);
12485 read_len
= min_t(u16
, len
, HCLGE_SFP_INFO_MAX_LEN
);
12486 sfp_info_bd0
->read_len
= cpu_to_le16(read_len
);
12488 ret
= hclge_cmd_send(&hdev
->hw
, desc
, i
);
12490 dev_err(&hdev
->pdev
->dev
,
12491 "failed to get SFP eeprom info, ret = %d\n", ret
);
12495 /* copy sfp info from bd0 to out buffer. */
12496 copy_len
= min_t(u16
, len
, HCLGE_SFP_INFO_BD0_LEN
);
12497 memcpy(data
, sfp_info_bd0
->data
, copy_len
);
12498 read_len
= copy_len
;
12500 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12501 for (i
= 1; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12502 if (read_len
>= len
)
12505 copy_len
= min_t(u16
, len
- read_len
, HCLGE_SFP_INFO_BDX_LEN
);
12506 memcpy(data
+ read_len
, desc
[i
].data
, copy_len
);
12507 read_len
+= copy_len
;
12513 static int hclge_get_module_eeprom(struct hnae3_handle
*handle
, u32 offset
,
12516 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12517 struct hclge_dev
*hdev
= vport
->back
;
12521 if (hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
12522 return -EOPNOTSUPP
;
12524 if (!hclge_module_existed(hdev
))
12527 while (read_len
< len
) {
12528 data_len
= hclge_get_sfp_eeprom_info(hdev
,
12535 read_len
+= data_len
;
12541 static int hclge_get_link_diagnosis_info(struct hnae3_handle
*handle
,
12544 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12545 struct hclge_dev
*hdev
= vport
->back
;
12546 struct hclge_desc desc
;
12549 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
)
12550 return -EOPNOTSUPP
;
12552 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_DIAGNOSIS
, true);
12553 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12555 dev_err(&hdev
->pdev
->dev
,
12556 "failed to query link diagnosis info, ret = %d\n", ret
);
12560 *status_code
= le32_to_cpu(desc
.data
[0]);
12564 /* After disable sriov, VF still has some config and info need clean,
12565 * which configed by PF.
12567 static void hclge_clear_vport_vf_info(struct hclge_vport
*vport
, int vfid
)
12569 struct hclge_dev
*hdev
= vport
->back
;
12570 struct hclge_vlan_info vlan_info
;
12573 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
12574 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12575 vport
->need_notify
= 0;
12578 /* after disable sriov, clean VF rate configured by PF */
12579 ret
= hclge_tm_qs_shaper_cfg(vport
, 0);
12581 dev_err(&hdev
->pdev
->dev
,
12582 "failed to clean vf%d rate config, ret = %d\n",
12585 vlan_info
.vlan_tag
= 0;
12587 vlan_info
.vlan_proto
= ETH_P_8021Q
;
12588 ret
= hclge_update_port_base_vlan_cfg(vport
,
12589 HNAE3_PORT_BASE_VLAN_DISABLE
,
12592 dev_err(&hdev
->pdev
->dev
,
12593 "failed to clean vf%d port base vlan, ret = %d\n",
12596 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, false);
12598 dev_err(&hdev
->pdev
->dev
,
12599 "failed to clean vf%d spoof config, ret = %d\n",
12602 memset(&vport
->vf_info
, 0, sizeof(vport
->vf_info
));
12605 static void hclge_clean_vport_config(struct hnae3_ae_dev
*ae_dev
, int num_vfs
)
12607 struct hclge_dev
*hdev
= ae_dev
->priv
;
12608 struct hclge_vport
*vport
;
12611 for (i
= 0; i
< num_vfs
; i
++) {
12612 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
12614 hclge_clear_vport_vf_info(vport
, i
);
12618 static int hclge_get_dscp_prio(struct hnae3_handle
*h
, u8 dscp
, u8
*tc_mode
,
12621 struct hclge_vport
*vport
= hclge_get_vport(h
);
12623 if (dscp
>= HNAE3_MAX_DSCP
)
12627 *tc_mode
= vport
->nic
.kinfo
.tc_map_mode
;
12629 *priority
= vport
->nic
.kinfo
.dscp_prio
[dscp
] == HNAE3_PRIO_ID_INVALID
? 0 :
12630 vport
->nic
.kinfo
.dscp_prio
[dscp
];
12635 static const struct hnae3_ae_ops hclge_ops
= {
12636 .init_ae_dev
= hclge_init_ae_dev
,
12637 .uninit_ae_dev
= hclge_uninit_ae_dev
,
12638 .reset_prepare
= hclge_reset_prepare_general
,
12639 .reset_done
= hclge_reset_done
,
12640 .init_client_instance
= hclge_init_client_instance
,
12641 .uninit_client_instance
= hclge_uninit_client_instance
,
12642 .map_ring_to_vector
= hclge_map_ring_to_vector
,
12643 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
12644 .get_vector
= hclge_get_vector
,
12645 .put_vector
= hclge_put_vector
,
12646 .set_promisc_mode
= hclge_set_promisc_mode
,
12647 .request_update_promisc_mode
= hclge_request_update_promisc_mode
,
12648 .set_loopback
= hclge_set_loopback
,
12649 .start
= hclge_ae_start
,
12650 .stop
= hclge_ae_stop
,
12651 .client_start
= hclge_client_start
,
12652 .client_stop
= hclge_client_stop
,
12653 .get_status
= hclge_get_status
,
12654 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
12655 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
12656 .get_media_type
= hclge_get_media_type
,
12657 .check_port_speed
= hclge_check_port_speed
,
12658 .get_fec_stats
= hclge_get_fec_stats
,
12659 .get_fec
= hclge_get_fec
,
12660 .set_fec
= hclge_set_fec
,
12661 .get_rss_key_size
= hclge_comm_get_rss_key_size
,
12662 .get_rss
= hclge_get_rss
,
12663 .set_rss
= hclge_set_rss
,
12664 .set_rss_tuple
= hclge_set_rss_tuple
,
12665 .get_rss_tuple
= hclge_get_rss_tuple
,
12666 .get_tc_size
= hclge_get_tc_size
,
12667 .get_mac_addr
= hclge_get_mac_addr
,
12668 .set_mac_addr
= hclge_set_mac_addr
,
12669 .do_ioctl
= hclge_do_ioctl
,
12670 .add_uc_addr
= hclge_add_uc_addr
,
12671 .rm_uc_addr
= hclge_rm_uc_addr
,
12672 .add_mc_addr
= hclge_add_mc_addr
,
12673 .rm_mc_addr
= hclge_rm_mc_addr
,
12674 .set_autoneg
= hclge_set_autoneg
,
12675 .get_autoneg
= hclge_get_autoneg
,
12676 .restart_autoneg
= hclge_restart_autoneg
,
12677 .halt_autoneg
= hclge_halt_autoneg
,
12678 .get_pauseparam
= hclge_get_pauseparam
,
12679 .set_pauseparam
= hclge_set_pauseparam
,
12680 .set_mtu
= hclge_set_mtu
,
12681 .reset_queue
= hclge_reset_tqp
,
12682 .get_stats
= hclge_get_stats
,
12683 .get_mac_stats
= hclge_get_mac_stat
,
12684 .update_stats
= hclge_update_stats
,
12685 .get_strings
= hclge_get_strings
,
12686 .get_sset_count
= hclge_get_sset_count
,
12687 .get_fw_version
= hclge_get_fw_version
,
12688 .get_mdix_mode
= hclge_get_mdix_mode
,
12689 .enable_vlan_filter
= hclge_enable_vlan_filter
,
12690 .set_vlan_filter
= hclge_set_vlan_filter
,
12691 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
12692 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
12693 .reset_event
= hclge_reset_event
,
12694 .get_reset_level
= hclge_get_reset_level
,
12695 .set_default_reset_request
= hclge_set_def_reset_request
,
12696 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
12697 .set_channels
= hclge_set_channels
,
12698 .get_channels
= hclge_get_channels
,
12699 .get_regs_len
= hclge_get_regs_len
,
12700 .get_regs
= hclge_get_regs
,
12701 .set_led_id
= hclge_set_led_id
,
12702 .get_link_mode
= hclge_get_link_mode
,
12703 .add_fd_entry
= hclge_add_fd_entry
,
12704 .del_fd_entry
= hclge_del_fd_entry
,
12705 .get_fd_rule_cnt
= hclge_get_fd_rule_cnt
,
12706 .get_fd_rule_info
= hclge_get_fd_rule_info
,
12707 .get_fd_all_rules
= hclge_get_all_rules
,
12708 .enable_fd
= hclge_enable_fd
,
12709 .add_arfs_entry
= hclge_add_fd_entry_by_arfs
,
12710 .dbg_read_cmd
= hclge_dbg_read_cmd
,
12711 .handle_hw_ras_error
= hclge_handle_hw_ras_error
,
12712 .get_hw_reset_stat
= hclge_get_hw_reset_stat
,
12713 .ae_dev_resetting
= hclge_ae_dev_resetting
,
12714 .ae_dev_reset_cnt
= hclge_ae_dev_reset_cnt
,
12715 .set_gro_en
= hclge_gro_en
,
12716 .get_global_queue_id
= hclge_covert_handle_qid_global
,
12717 .set_timer_task
= hclge_set_timer_task
,
12718 .mac_connect_phy
= hclge_mac_connect_phy
,
12719 .mac_disconnect_phy
= hclge_mac_disconnect_phy
,
12720 .get_vf_config
= hclge_get_vf_config
,
12721 .set_vf_link_state
= hclge_set_vf_link_state
,
12722 .set_vf_spoofchk
= hclge_set_vf_spoofchk
,
12723 .set_vf_trust
= hclge_set_vf_trust
,
12724 .set_vf_rate
= hclge_set_vf_rate
,
12725 .set_vf_mac
= hclge_set_vf_mac
,
12726 .get_module_eeprom
= hclge_get_module_eeprom
,
12727 .get_cmdq_stat
= hclge_get_cmdq_stat
,
12728 .add_cls_flower
= hclge_add_cls_flower
,
12729 .del_cls_flower
= hclge_del_cls_flower
,
12730 .cls_flower_active
= hclge_is_cls_flower_active
,
12731 .get_phy_link_ksettings
= hclge_get_phy_link_ksettings
,
12732 .set_phy_link_ksettings
= hclge_set_phy_link_ksettings
,
12733 .set_tx_hwts_info
= hclge_ptp_set_tx_info
,
12734 .get_rx_hwts
= hclge_ptp_get_rx_hwts
,
12735 .get_ts_info
= hclge_ptp_get_ts_info
,
12736 .get_link_diagnosis_info
= hclge_get_link_diagnosis_info
,
12737 .clean_vf_config
= hclge_clean_vport_config
,
12738 .get_dscp_prio
= hclge_get_dscp_prio
,
12739 .get_wol
= hclge_get_wol
,
12740 .set_wol
= hclge_set_wol
,
12743 static struct hnae3_ae_algo ae_algo
= {
12745 .pdev_id_table
= ae_algo_pci_tbl
,
12748 static int __init
hclge_init(void)
12750 pr_info("%s is initializing\n", HCLGE_NAME
);
12752 hclge_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, HCLGE_NAME
);
12754 pr_err("%s: failed to create workqueue\n", HCLGE_NAME
);
12758 hnae3_register_ae_algo(&ae_algo
);
12763 static void __exit
hclge_exit(void)
12765 hnae3_unregister_ae_algo_prepare(&ae_algo
);
12766 hnae3_unregister_ae_algo(&ae_algo
);
12767 destroy_workqueue(hclge_wq
);
12769 module_init(hclge_init
);
12770 module_exit(hclge_exit
);
12772 MODULE_LICENSE("GPL");
12773 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12774 MODULE_DESCRIPTION("HCLGE Driver");
12775 MODULE_VERSION(HCLGE_MOD_VERSION
);