1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_regs.h"
25 #include "hclge_err.h"
27 #include "hclge_devlink.h"
28 #include "hclge_comm_cmd.h"
30 #define HCLGE_NAME "hclge"
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 #define HCLGE_LINK_STATUS_MS 10
46 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
);
47 static int hclge_init_vlan_config(struct hclge_dev
*hdev
);
48 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
);
49 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
);
50 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
);
51 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
);
52 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
);
53 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
55 static int hclge_set_default_loopback(struct hclge_dev
*hdev
);
57 static void hclge_sync_mac_table(struct hclge_dev
*hdev
);
58 static void hclge_restore_hw_table(struct hclge_dev
*hdev
);
59 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
);
60 static void hclge_sync_fd_table(struct hclge_dev
*hdev
);
61 static void hclge_update_fec_stats(struct hclge_dev
*hdev
);
62 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
65 static struct hnae3_ae_algo ae_algo
;
67 static struct workqueue_struct
*hclge_wq
;
69 static const struct pci_device_id ae_algo_pci_tbl
[] = {
70 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
71 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
72 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
73 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
74 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
75 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
76 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
77 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_200G_RDMA
), 0},
78 /* required last entry */
82 MODULE_DEVICE_TABLE(pci
, ae_algo_pci_tbl
);
84 static const char hns3_nic_test_strs
[][ETH_GSTRING_LEN
] = {
85 "External Loopback test",
87 "Serdes serial Loopback test",
88 "Serdes parallel Loopback test",
92 static const struct hclge_comm_stats_str g_mac_stats_string
[] = {
93 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
94 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num
)},
95 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
96 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num
)},
97 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time
)},
99 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
100 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time
)},
101 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num
)},
103 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
104 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num
)},
105 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num
)},
107 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
108 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
)},
109 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
)},
111 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
)},
113 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
)},
115 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
)},
117 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
)},
119 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
)},
121 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)},
123 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time
)},
125 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time
)},
127 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time
)},
129 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time
)},
131 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time
)},
133 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time
)},
135 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time
)},
137 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time
)},
139 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num
)},
141 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
)},
143 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
)},
145 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
)},
147 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
)},
149 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
)},
151 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
)},
153 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
)},
155 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)},
157 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time
)},
159 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time
)},
161 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time
)},
163 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time
)},
165 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time
)},
167 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time
)},
169 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time
)},
171 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2
,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time
)},
173 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num
)},
175 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num
)},
177 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num
)},
179 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num
)},
181 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num
)},
183 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num
)},
185 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num
)},
187 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num
)},
189 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num
)},
191 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num
)},
193 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num
)},
195 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num
)},
197 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num
)},
199 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num
)},
201 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num
)},
203 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num
)},
205 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num
)},
207 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num
)},
209 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num
)},
211 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num
)},
213 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num
)},
215 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num
)},
217 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num
)},
219 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num
)},
221 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num
)},
223 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num
)},
225 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num
)},
227 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num
)},
229 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num
)},
231 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num
)},
233 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num
)},
235 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num
)},
237 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num
)},
239 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num
)},
241 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num
)},
243 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num
)},
245 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num
)},
247 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num
)},
249 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num
)},
251 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num
)},
253 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num
)},
255 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num
)},
257 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num
)},
259 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num
)},
261 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num
)},
263 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num
)},
265 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num
)},
267 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num
)},
269 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num
)},
271 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num
)},
274 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num
)},
276 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num
)},
278 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num
)},
280 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num
)},
282 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num
)},
284 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num
)},
286 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num
)},
288 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num
)},
290 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num
)},
292 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num
)},
294 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num
)},
296 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1
,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num
)}
300 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table
[] = {
302 .flags
= HCLGE_MAC_MGR_MASK_VLAN_B
,
303 .ethter_type
= cpu_to_le16(ETH_P_LLDP
),
304 .mac_addr
= {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
305 .i_port_bitmap
= 0x1,
309 static const struct key_info meta_data_key_info
[] = {
310 { PACKET_TYPE_ID
, 6 },
317 { TUNNEL_PACKET
, 1 },
320 static const struct key_info tuple_key_info
[] = {
321 { OUTER_DST_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
322 { OUTER_SRC_MAC
, 48, KEY_OPT_MAC
, -1, -1 },
323 { OUTER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
, -1, -1 },
324 { OUTER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
325 { OUTER_ETH_TYPE
, 16, KEY_OPT_LE16
, -1, -1 },
326 { OUTER_L2_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
327 { OUTER_IP_TOS
, 8, KEY_OPT_U8
, -1, -1 },
328 { OUTER_IP_PROTO
, 8, KEY_OPT_U8
, -1, -1 },
329 { OUTER_SRC_IP
, 32, KEY_OPT_IP
, -1, -1 },
330 { OUTER_DST_IP
, 32, KEY_OPT_IP
, -1, -1 },
331 { OUTER_L3_RSV
, 16, KEY_OPT_LE16
, -1, -1 },
332 { OUTER_SRC_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
333 { OUTER_DST_PORT
, 16, KEY_OPT_LE16
, -1, -1 },
334 { OUTER_L4_RSV
, 32, KEY_OPT_LE32
, -1, -1 },
335 { OUTER_TUN_VNI
, 24, KEY_OPT_VNI
, -1, -1 },
336 { OUTER_TUN_FLOW_ID
, 8, KEY_OPT_U8
, -1, -1 },
337 { INNER_DST_MAC
, 48, KEY_OPT_MAC
,
338 offsetof(struct hclge_fd_rule
, tuples
.dst_mac
),
339 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_mac
) },
340 { INNER_SRC_MAC
, 48, KEY_OPT_MAC
,
341 offsetof(struct hclge_fd_rule
, tuples
.src_mac
),
342 offsetof(struct hclge_fd_rule
, tuples_mask
.src_mac
) },
343 { INNER_VLAN_TAG_FST
, 16, KEY_OPT_LE16
,
344 offsetof(struct hclge_fd_rule
, tuples
.vlan_tag1
),
345 offsetof(struct hclge_fd_rule
, tuples_mask
.vlan_tag1
) },
346 { INNER_VLAN_TAG_SEC
, 16, KEY_OPT_LE16
, -1, -1 },
347 { INNER_ETH_TYPE
, 16, KEY_OPT_LE16
,
348 offsetof(struct hclge_fd_rule
, tuples
.ether_proto
),
349 offsetof(struct hclge_fd_rule
, tuples_mask
.ether_proto
) },
350 { INNER_L2_RSV
, 16, KEY_OPT_LE16
,
351 offsetof(struct hclge_fd_rule
, tuples
.l2_user_def
),
352 offsetof(struct hclge_fd_rule
, tuples_mask
.l2_user_def
) },
353 { INNER_IP_TOS
, 8, KEY_OPT_U8
,
354 offsetof(struct hclge_fd_rule
, tuples
.ip_tos
),
355 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_tos
) },
356 { INNER_IP_PROTO
, 8, KEY_OPT_U8
,
357 offsetof(struct hclge_fd_rule
, tuples
.ip_proto
),
358 offsetof(struct hclge_fd_rule
, tuples_mask
.ip_proto
) },
359 { INNER_SRC_IP
, 32, KEY_OPT_IP
,
360 offsetof(struct hclge_fd_rule
, tuples
.src_ip
),
361 offsetof(struct hclge_fd_rule
, tuples_mask
.src_ip
) },
362 { INNER_DST_IP
, 32, KEY_OPT_IP
,
363 offsetof(struct hclge_fd_rule
, tuples
.dst_ip
),
364 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_ip
) },
365 { INNER_L3_RSV
, 16, KEY_OPT_LE16
,
366 offsetof(struct hclge_fd_rule
, tuples
.l3_user_def
),
367 offsetof(struct hclge_fd_rule
, tuples_mask
.l3_user_def
) },
368 { INNER_SRC_PORT
, 16, KEY_OPT_LE16
,
369 offsetof(struct hclge_fd_rule
, tuples
.src_port
),
370 offsetof(struct hclge_fd_rule
, tuples_mask
.src_port
) },
371 { INNER_DST_PORT
, 16, KEY_OPT_LE16
,
372 offsetof(struct hclge_fd_rule
, tuples
.dst_port
),
373 offsetof(struct hclge_fd_rule
, tuples_mask
.dst_port
) },
374 { INNER_L4_RSV
, 32, KEY_OPT_LE32
,
375 offsetof(struct hclge_fd_rule
, tuples
.l4_user_def
),
376 offsetof(struct hclge_fd_rule
, tuples_mask
.l4_user_def
) },
380 * hclge_cmd_send - send command to command queue
381 * @hw: pointer to the hw struct
382 * @desc: prefilled descriptor for describing the command
383 * @num : the number of descriptors to be sent
385 * This is the main send command for command queue, it
386 * sends the queue, cleans the queue, etc
388 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
390 return hclge_comm_cmd_send(&hw
->hw
, desc
, num
);
393 static int hclge_mac_update_stats_defective(struct hclge_dev
*hdev
)
395 #define HCLGE_MAC_CMD_NUM 21
397 u64
*data
= (u64
*)(&hdev
->mac_stats
);
398 struct hclge_desc desc
[HCLGE_MAC_CMD_NUM
];
404 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC
, true);
405 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_MAC_CMD_NUM
);
407 dev_err(&hdev
->pdev
->dev
,
408 "Get MAC pkt stats fail, status = %d.\n", ret
);
413 /* The first desc has a 64-bit header, so data size need to minus 1 */
414 data_size
= sizeof(desc
) / (sizeof(u64
)) - 1;
416 desc_data
= (__le64
*)(&desc
[0].data
[0]);
417 for (i
= 0; i
< data_size
; i
++) {
418 /* data memory is continuous becase only the first desc has a
419 * header in this command
421 *data
+= le64_to_cpu(*desc_data
);
429 static int hclge_mac_update_stats_complete(struct hclge_dev
*hdev
)
431 #define HCLGE_REG_NUM_PER_DESC 4
433 u32 reg_num
= hdev
->ae_dev
->dev_specs
.mac_stats_num
;
434 u64
*data
= (u64
*)(&hdev
->mac_stats
);
435 struct hclge_desc
*desc
;
442 /* The first desc has a 64-bit header, so need to consider it */
443 desc_num
= reg_num
/ HCLGE_REG_NUM_PER_DESC
+ 1;
445 /* This may be called inside atomic sections,
446 * so GFP_ATOMIC is more suitalbe here
448 desc
= kcalloc(desc_num
, sizeof(struct hclge_desc
), GFP_ATOMIC
);
452 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_STATS_MAC_ALL
, true);
453 ret
= hclge_cmd_send(&hdev
->hw
, desc
, desc_num
);
459 data_size
= min_t(u32
, sizeof(hdev
->mac_stats
) / sizeof(u64
), reg_num
);
461 desc_data
= (__le64
*)(&desc
[0].data
[0]);
462 for (i
= 0; i
< data_size
; i
++) {
463 /* data memory is continuous becase only the first desc has a
464 * header in this command
466 *data
+= le64_to_cpu(*desc_data
);
476 static int hclge_mac_query_reg_num(struct hclge_dev
*hdev
, u32
*reg_num
)
478 struct hclge_desc desc
;
481 /* Driver needs total register number of both valid registers and
482 * reserved registers, but the old firmware only returns number
483 * of valid registers in device V2. To be compatible with these
484 * devices, driver uses a fixed value.
486 if (hdev
->ae_dev
->dev_version
== HNAE3_DEVICE_VERSION_V2
) {
487 *reg_num
= HCLGE_MAC_STATS_MAX_NUM_V1
;
491 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_MAC_REG_NUM
, true);
492 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
494 dev_err(&hdev
->pdev
->dev
,
495 "failed to query mac statistic reg number, ret = %d\n",
500 *reg_num
= le32_to_cpu(desc
.data
[0]);
502 dev_err(&hdev
->pdev
->dev
,
503 "mac statistic reg number is invalid!\n");
510 int hclge_mac_update_stats(struct hclge_dev
*hdev
)
512 /* The firmware supports the new statistics acquisition method */
513 if (hdev
->ae_dev
->dev_specs
.mac_stats_num
)
514 return hclge_mac_update_stats_complete(hdev
);
516 return hclge_mac_update_stats_defective(hdev
);
519 static int hclge_comm_get_count(struct hclge_dev
*hdev
,
520 const struct hclge_comm_stats_str strs
[],
526 for (i
= 0; i
< size
; i
++)
527 if (strs
[i
].stats_num
<= hdev
->ae_dev
->dev_specs
.mac_stats_num
)
533 static u64
*hclge_comm_get_stats(struct hclge_dev
*hdev
,
534 const struct hclge_comm_stats_str strs
[],
540 for (i
= 0; i
< size
; i
++) {
541 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
544 *buf
= HCLGE_STATS_READ(&hdev
->mac_stats
, strs
[i
].offset
);
551 static u8
*hclge_comm_get_strings(struct hclge_dev
*hdev
, u32 stringset
,
552 const struct hclge_comm_stats_str strs
[],
555 char *buff
= (char *)data
;
558 if (stringset
!= ETH_SS_STATS
)
561 for (i
= 0; i
< size
; i
++) {
562 if (strs
[i
].stats_num
> hdev
->ae_dev
->dev_specs
.mac_stats_num
)
565 snprintf(buff
, ETH_GSTRING_LEN
, "%s", strs
[i
].desc
);
566 buff
= buff
+ ETH_GSTRING_LEN
;
572 static void hclge_update_stats_for_all(struct hclge_dev
*hdev
)
574 struct hnae3_handle
*handle
;
577 handle
= &hdev
->vport
[0].nic
;
578 if (handle
->client
) {
579 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
581 dev_err(&hdev
->pdev
->dev
,
582 "Update TQPS stats fail, status = %d.\n",
587 hclge_update_fec_stats(hdev
);
589 status
= hclge_mac_update_stats(hdev
);
591 dev_err(&hdev
->pdev
->dev
,
592 "Update MAC stats fail, status = %d.\n", status
);
595 static void hclge_update_stats(struct hnae3_handle
*handle
)
597 struct hclge_vport
*vport
= hclge_get_vport(handle
);
598 struct hclge_dev
*hdev
= vport
->back
;
601 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
))
604 status
= hclge_mac_update_stats(hdev
);
606 dev_err(&hdev
->pdev
->dev
,
607 "Update MAC stats fail, status = %d.\n",
610 status
= hclge_comm_tqps_update_stats(handle
, &hdev
->hw
.hw
);
612 dev_err(&hdev
->pdev
->dev
,
613 "Update TQPS stats fail, status = %d.\n",
616 clear_bit(HCLGE_STATE_STATISTICS_UPDATING
, &hdev
->state
);
619 static int hclge_get_sset_count(struct hnae3_handle
*handle
, int stringset
)
621 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
622 HNAE3_SUPPORT_PHY_LOOPBACK | \
623 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
624 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
625 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
627 struct hclge_vport
*vport
= hclge_get_vport(handle
);
628 struct hclge_dev
*hdev
= vport
->back
;
631 /* Loopback test support rules:
632 * mac: only GE mode support
633 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 * phy: only support when phy device exist on board
636 if (stringset
== ETH_SS_TEST
) {
637 /* clear loopback bit flags at first */
638 handle
->flags
= (handle
->flags
& (~HCLGE_LOOPBACK_TEST_FLAGS
));
639 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
||
640 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_10M
||
641 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_100M
||
642 hdev
->hw
.mac
.speed
== HCLGE_MAC_SPEED_1G
) {
644 handle
->flags
|= HNAE3_SUPPORT_APP_LOOPBACK
;
648 handle
->flags
|= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
650 handle
->flags
|= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
652 handle
->flags
|= HNAE3_SUPPORT_EXTERNAL_LOOPBACK
;
654 if ((hdev
->hw
.mac
.phydev
&& hdev
->hw
.mac
.phydev
->drv
&&
655 hdev
->hw
.mac
.phydev
->drv
->set_loopback
) ||
656 hnae3_dev_phy_imp_supported(hdev
)) {
658 handle
->flags
|= HNAE3_SUPPORT_PHY_LOOPBACK
;
660 } else if (stringset
== ETH_SS_STATS
) {
661 count
= hclge_comm_get_count(hdev
, g_mac_stats_string
,
662 ARRAY_SIZE(g_mac_stats_string
)) +
663 hclge_comm_tqps_get_sset_count(handle
);
669 static void hclge_get_strings(struct hnae3_handle
*handle
, u32 stringset
,
672 struct hclge_vport
*vport
= hclge_get_vport(handle
);
673 struct hclge_dev
*hdev
= vport
->back
;
674 u8
*p
= (char *)data
;
677 if (stringset
== ETH_SS_STATS
) {
678 size
= ARRAY_SIZE(g_mac_stats_string
);
679 p
= hclge_comm_get_strings(hdev
, stringset
, g_mac_stats_string
,
681 p
= hclge_comm_tqps_get_strings(handle
, p
);
682 } else if (stringset
== ETH_SS_TEST
) {
683 if (handle
->flags
& HNAE3_SUPPORT_EXTERNAL_LOOPBACK
) {
684 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_EXTERNAL
],
686 p
+= ETH_GSTRING_LEN
;
688 if (handle
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
) {
689 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_APP
],
691 p
+= ETH_GSTRING_LEN
;
693 if (handle
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
) {
694 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_SERIAL_SERDES
],
696 p
+= ETH_GSTRING_LEN
;
698 if (handle
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
) {
700 hns3_nic_test_strs
[HNAE3_LOOP_PARALLEL_SERDES
],
702 p
+= ETH_GSTRING_LEN
;
704 if (handle
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
) {
705 memcpy(p
, hns3_nic_test_strs
[HNAE3_LOOP_PHY
],
707 p
+= ETH_GSTRING_LEN
;
712 static void hclge_get_stats(struct hnae3_handle
*handle
, u64
*data
)
714 struct hclge_vport
*vport
= hclge_get_vport(handle
);
715 struct hclge_dev
*hdev
= vport
->back
;
718 p
= hclge_comm_get_stats(hdev
, g_mac_stats_string
,
719 ARRAY_SIZE(g_mac_stats_string
), data
);
720 p
= hclge_comm_tqps_get_stats(handle
, p
);
723 static void hclge_get_mac_stat(struct hnae3_handle
*handle
,
724 struct hns3_mac_stats
*mac_stats
)
726 struct hclge_vport
*vport
= hclge_get_vport(handle
);
727 struct hclge_dev
*hdev
= vport
->back
;
729 hclge_update_stats(handle
);
731 mac_stats
->tx_pause_cnt
= hdev
->mac_stats
.mac_tx_mac_pause_num
;
732 mac_stats
->rx_pause_cnt
= hdev
->mac_stats
.mac_rx_mac_pause_num
;
735 static int hclge_parse_func_status(struct hclge_dev
*hdev
,
736 struct hclge_func_status_cmd
*status
)
738 #define HCLGE_MAC_ID_MASK 0xF
740 if (!(status
->pf_state
& HCLGE_PF_STATE_DONE
))
743 /* Set the pf to main pf */
744 if (status
->pf_state
& HCLGE_PF_STATE_MAIN
)
745 hdev
->flag
|= HCLGE_FLAG_MAIN
;
747 hdev
->flag
&= ~HCLGE_FLAG_MAIN
;
749 hdev
->hw
.mac
.mac_id
= status
->mac_id
& HCLGE_MAC_ID_MASK
;
753 static int hclge_query_function_status(struct hclge_dev
*hdev
)
755 #define HCLGE_QUERY_MAX_CNT 5
757 struct hclge_func_status_cmd
*req
;
758 struct hclge_desc desc
;
762 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FUNC_STATUS
, true);
763 req
= (struct hclge_func_status_cmd
*)desc
.data
;
766 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
768 dev_err(&hdev
->pdev
->dev
,
769 "query function status failed %d.\n", ret
);
773 /* Check pf reset is done */
776 usleep_range(1000, 2000);
777 } while (timeout
++ < HCLGE_QUERY_MAX_CNT
);
779 return hclge_parse_func_status(hdev
, req
);
782 static int hclge_query_pf_resource(struct hclge_dev
*hdev
)
784 struct hclge_pf_res_cmd
*req
;
785 struct hclge_desc desc
;
788 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_PF_RSRC
, true);
789 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
791 dev_err(&hdev
->pdev
->dev
,
792 "query pf resource failed %d.\n", ret
);
796 req
= (struct hclge_pf_res_cmd
*)desc
.data
;
797 hdev
->num_tqps
= le16_to_cpu(req
->tqp_num
) +
798 le16_to_cpu(req
->ext_tqp_num
);
799 hdev
->pkt_buf_size
= le16_to_cpu(req
->buf_size
) << HCLGE_BUF_UNIT_S
;
801 if (req
->tx_buf_size
)
803 le16_to_cpu(req
->tx_buf_size
) << HCLGE_BUF_UNIT_S
;
805 hdev
->tx_buf_size
= HCLGE_DEFAULT_TX_BUF
;
807 hdev
->tx_buf_size
= roundup(hdev
->tx_buf_size
, HCLGE_BUF_SIZE_UNIT
);
809 if (req
->dv_buf_size
)
811 le16_to_cpu(req
->dv_buf_size
) << HCLGE_BUF_UNIT_S
;
813 hdev
->dv_buf_size
= HCLGE_DEFAULT_DV
;
815 hdev
->dv_buf_size
= roundup(hdev
->dv_buf_size
, HCLGE_BUF_SIZE_UNIT
);
817 hdev
->num_nic_msi
= le16_to_cpu(req
->msixcap_localid_number_nic
);
818 if (hdev
->num_nic_msi
< HNAE3_MIN_VECTOR_NUM
) {
819 dev_err(&hdev
->pdev
->dev
,
820 "only %u msi resources available, not enough for pf(min:2).\n",
825 if (hnae3_dev_roce_supported(hdev
)) {
827 le16_to_cpu(req
->pf_intr_vector_number_roce
);
829 /* PF should have NIC vectors and Roce vectors,
830 * NIC vectors are queued before Roce vectors.
832 hdev
->num_msi
= hdev
->num_nic_msi
+ hdev
->num_roce_msi
;
834 hdev
->num_msi
= hdev
->num_nic_msi
;
840 static int hclge_parse_speed(u8 speed_cmd
, u32
*speed
)
843 case HCLGE_FW_MAC_SPEED_10M
:
844 *speed
= HCLGE_MAC_SPEED_10M
;
846 case HCLGE_FW_MAC_SPEED_100M
:
847 *speed
= HCLGE_MAC_SPEED_100M
;
849 case HCLGE_FW_MAC_SPEED_1G
:
850 *speed
= HCLGE_MAC_SPEED_1G
;
852 case HCLGE_FW_MAC_SPEED_10G
:
853 *speed
= HCLGE_MAC_SPEED_10G
;
855 case HCLGE_FW_MAC_SPEED_25G
:
856 *speed
= HCLGE_MAC_SPEED_25G
;
858 case HCLGE_FW_MAC_SPEED_40G
:
859 *speed
= HCLGE_MAC_SPEED_40G
;
861 case HCLGE_FW_MAC_SPEED_50G
:
862 *speed
= HCLGE_MAC_SPEED_50G
;
864 case HCLGE_FW_MAC_SPEED_100G
:
865 *speed
= HCLGE_MAC_SPEED_100G
;
867 case HCLGE_FW_MAC_SPEED_200G
:
868 *speed
= HCLGE_MAC_SPEED_200G
;
877 static const struct hclge_speed_bit_map speed_bit_map
[] = {
878 {HCLGE_MAC_SPEED_10M
, HCLGE_SUPPORT_10M_BIT
},
879 {HCLGE_MAC_SPEED_100M
, HCLGE_SUPPORT_100M_BIT
},
880 {HCLGE_MAC_SPEED_1G
, HCLGE_SUPPORT_1G_BIT
},
881 {HCLGE_MAC_SPEED_10G
, HCLGE_SUPPORT_10G_BIT
},
882 {HCLGE_MAC_SPEED_25G
, HCLGE_SUPPORT_25G_BIT
},
883 {HCLGE_MAC_SPEED_40G
, HCLGE_SUPPORT_40G_BIT
},
884 {HCLGE_MAC_SPEED_50G
, HCLGE_SUPPORT_50G_BIT
},
885 {HCLGE_MAC_SPEED_100G
, HCLGE_SUPPORT_100G_BIT
},
886 {HCLGE_MAC_SPEED_200G
, HCLGE_SUPPORT_200G_BIT
},
889 static int hclge_get_speed_bit(u32 speed
, u32
*speed_bit
)
893 for (i
= 0; i
< ARRAY_SIZE(speed_bit_map
); i
++) {
894 if (speed
== speed_bit_map
[i
].speed
) {
895 *speed_bit
= speed_bit_map
[i
].speed_bit
;
903 static int hclge_check_port_speed(struct hnae3_handle
*handle
, u32 speed
)
905 struct hclge_vport
*vport
= hclge_get_vport(handle
);
906 struct hclge_dev
*hdev
= vport
->back
;
907 u32 speed_ability
= hdev
->hw
.mac
.speed_ability
;
911 ret
= hclge_get_speed_bit(speed
, &speed_bit
);
915 if (speed_bit
& speed_ability
)
921 static void hclge_update_fec_support(struct hclge_mac
*mac
)
923 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
, mac
->supported
);
924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
, mac
->supported
);
925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
, mac
->supported
);
926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
928 if (mac
->fec_ability
& BIT(HNAE3_FEC_BASER
))
929 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
931 if (mac
->fec_ability
& BIT(HNAE3_FEC_RS
))
932 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
934 if (mac
->fec_ability
& BIT(HNAE3_FEC_LLRS
))
935 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
937 if (mac
->fec_ability
& BIT(HNAE3_FEC_NONE
))
938 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
942 static void hclge_convert_setting_sr(u16 speed_ability
,
943 unsigned long *link_mode
)
945 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
946 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
948 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
949 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
951 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
954 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
957 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
960 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
,
965 static void hclge_convert_setting_lr(u16 speed_ability
,
966 unsigned long *link_mode
)
968 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
969 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
971 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
972 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
974 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
975 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
977 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
978 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
980 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
981 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
983 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
985 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
,
989 static void hclge_convert_setting_cr(u16 speed_ability
,
990 unsigned long *link_mode
)
992 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
993 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
995 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
996 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
998 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
999 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
1001 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
1002 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
1004 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
1005 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
1007 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
,
1012 static void hclge_convert_setting_kr(u16 speed_ability
,
1013 unsigned long *link_mode
)
1015 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1016 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
1018 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
1021 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1022 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
1024 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
1025 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
1027 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
1030 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
1033 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
,
1038 static void hclge_convert_setting_fec(struct hclge_mac
*mac
)
1040 /* If firmware has reported fec_ability, don't need to convert by speed */
1041 if (mac
->fec_ability
)
1044 switch (mac
->speed
) {
1045 case HCLGE_MAC_SPEED_10G
:
1046 case HCLGE_MAC_SPEED_40G
:
1047 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_AUTO
) |
1048 BIT(HNAE3_FEC_NONE
);
1050 case HCLGE_MAC_SPEED_25G
:
1051 case HCLGE_MAC_SPEED_50G
:
1052 mac
->fec_ability
= BIT(HNAE3_FEC_BASER
) | BIT(HNAE3_FEC_RS
) |
1053 BIT(HNAE3_FEC_AUTO
) | BIT(HNAE3_FEC_NONE
);
1055 case HCLGE_MAC_SPEED_100G
:
1056 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1057 BIT(HNAE3_FEC_NONE
);
1059 case HCLGE_MAC_SPEED_200G
:
1060 mac
->fec_ability
= BIT(HNAE3_FEC_RS
) | BIT(HNAE3_FEC_AUTO
) |
1061 BIT(HNAE3_FEC_LLRS
);
1064 mac
->fec_ability
= 0;
1069 hclge_update_fec_support(mac
);
1072 static void hclge_parse_fiber_link_mode(struct hclge_dev
*hdev
,
1075 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1077 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
1081 hclge_convert_setting_sr(speed_ability
, mac
->supported
);
1082 hclge_convert_setting_lr(speed_ability
, mac
->supported
);
1083 hclge_convert_setting_cr(speed_ability
, mac
->supported
);
1084 if (hnae3_dev_fec_supported(hdev
))
1085 hclge_convert_setting_fec(mac
);
1087 if (hnae3_dev_pause_supported(hdev
))
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
, mac
->supported
);
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1094 static void hclge_parse_backplane_link_mode(struct hclge_dev
*hdev
,
1097 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1099 hclge_convert_setting_kr(speed_ability
, mac
->supported
);
1100 if (hnae3_dev_fec_supported(hdev
))
1101 hclge_convert_setting_fec(mac
);
1103 if (hnae3_dev_pause_supported(hdev
))
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, mac
->supported
);
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT
, mac
->supported
);
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
, mac
->supported
);
1110 static void hclge_parse_copper_link_mode(struct hclge_dev
*hdev
,
1113 unsigned long *supported
= hdev
->hw
.mac
.supported
;
1115 /* default to support all speed for GE port */
1117 speed_ability
= HCLGE_SUPPORT_GE
;
1119 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1120 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1123 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
) {
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1126 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
,
1130 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
) {
1131 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT
, supported
);
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT
, supported
);
1135 if (hnae3_dev_pause_supported(hdev
)) {
1136 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
, supported
);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
, supported
);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, supported
);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
, supported
);
1144 static void hclge_parse_link_mode(struct hclge_dev
*hdev
, u16 speed_ability
)
1146 u8 media_type
= hdev
->hw
.mac
.media_type
;
1148 if (media_type
== HNAE3_MEDIA_TYPE_FIBER
)
1149 hclge_parse_fiber_link_mode(hdev
, speed_ability
);
1150 else if (media_type
== HNAE3_MEDIA_TYPE_COPPER
)
1151 hclge_parse_copper_link_mode(hdev
, speed_ability
);
1152 else if (media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
)
1153 hclge_parse_backplane_link_mode(hdev
, speed_ability
);
1156 static u32
hclge_get_max_speed(u16 speed_ability
)
1158 if (speed_ability
& HCLGE_SUPPORT_200G_BIT
)
1159 return HCLGE_MAC_SPEED_200G
;
1161 if (speed_ability
& HCLGE_SUPPORT_100G_BIT
)
1162 return HCLGE_MAC_SPEED_100G
;
1164 if (speed_ability
& HCLGE_SUPPORT_50G_BIT
)
1165 return HCLGE_MAC_SPEED_50G
;
1167 if (speed_ability
& HCLGE_SUPPORT_40G_BIT
)
1168 return HCLGE_MAC_SPEED_40G
;
1170 if (speed_ability
& HCLGE_SUPPORT_25G_BIT
)
1171 return HCLGE_MAC_SPEED_25G
;
1173 if (speed_ability
& HCLGE_SUPPORT_10G_BIT
)
1174 return HCLGE_MAC_SPEED_10G
;
1176 if (speed_ability
& HCLGE_SUPPORT_1G_BIT
)
1177 return HCLGE_MAC_SPEED_1G
;
1179 if (speed_ability
& HCLGE_SUPPORT_100M_BIT
)
1180 return HCLGE_MAC_SPEED_100M
;
1182 if (speed_ability
& HCLGE_SUPPORT_10M_BIT
)
1183 return HCLGE_MAC_SPEED_10M
;
1185 return HCLGE_MAC_SPEED_1G
;
1188 static void hclge_parse_cfg(struct hclge_cfg
*cfg
, struct hclge_desc
*desc
)
1190 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1191 #define SPEED_ABILITY_EXT_SHIFT 8
1193 struct hclge_cfg_param_cmd
*req
;
1194 u64 mac_addr_tmp_high
;
1195 u16 speed_ability_ext
;
1199 req
= (struct hclge_cfg_param_cmd
*)desc
[0].data
;
1201 /* get the configuration */
1202 cfg
->tc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1203 HCLGE_CFG_TC_NUM_M
, HCLGE_CFG_TC_NUM_S
);
1204 cfg
->tqp_desc_num
= hnae3_get_field(__le32_to_cpu(req
->param
[0]),
1205 HCLGE_CFG_TQP_DESC_N_M
,
1206 HCLGE_CFG_TQP_DESC_N_S
);
1208 cfg
->phy_addr
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1209 HCLGE_CFG_PHY_ADDR_M
,
1210 HCLGE_CFG_PHY_ADDR_S
);
1211 cfg
->media_type
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1212 HCLGE_CFG_MEDIA_TP_M
,
1213 HCLGE_CFG_MEDIA_TP_S
);
1214 cfg
->rx_buf_len
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1215 HCLGE_CFG_RX_BUF_LEN_M
,
1216 HCLGE_CFG_RX_BUF_LEN_S
);
1217 /* get mac_address */
1218 mac_addr_tmp
= __le32_to_cpu(req
->param
[2]);
1219 mac_addr_tmp_high
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1220 HCLGE_CFG_MAC_ADDR_H_M
,
1221 HCLGE_CFG_MAC_ADDR_H_S
);
1223 mac_addr_tmp
|= (mac_addr_tmp_high
<< 31) << 1;
1225 cfg
->default_speed
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1226 HCLGE_CFG_DEFAULT_SPEED_M
,
1227 HCLGE_CFG_DEFAULT_SPEED_S
);
1228 cfg
->vf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[3]),
1229 HCLGE_CFG_RSS_SIZE_M
,
1230 HCLGE_CFG_RSS_SIZE_S
);
1232 for (i
= 0; i
< ETH_ALEN
; i
++)
1233 cfg
->mac_addr
[i
] = (mac_addr_tmp
>> (8 * i
)) & 0xff;
1235 req
= (struct hclge_cfg_param_cmd
*)desc
[1].data
;
1236 cfg
->numa_node_map
= __le32_to_cpu(req
->param
[0]);
1238 cfg
->speed_ability
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1239 HCLGE_CFG_SPEED_ABILITY_M
,
1240 HCLGE_CFG_SPEED_ABILITY_S
);
1241 speed_ability_ext
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1242 HCLGE_CFG_SPEED_ABILITY_EXT_M
,
1243 HCLGE_CFG_SPEED_ABILITY_EXT_S
);
1244 cfg
->speed_ability
|= speed_ability_ext
<< SPEED_ABILITY_EXT_SHIFT
;
1246 cfg
->vlan_fliter_cap
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1247 HCLGE_CFG_VLAN_FLTR_CAP_M
,
1248 HCLGE_CFG_VLAN_FLTR_CAP_S
);
1250 cfg
->umv_space
= hnae3_get_field(__le32_to_cpu(req
->param
[1]),
1251 HCLGE_CFG_UMV_TBL_SPACE_M
,
1252 HCLGE_CFG_UMV_TBL_SPACE_S
);
1254 cfg
->pf_rss_size_max
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1255 HCLGE_CFG_PF_RSS_SIZE_M
,
1256 HCLGE_CFG_PF_RSS_SIZE_S
);
1258 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1259 * power of 2, instead of reading out directly. This would
1260 * be more flexible for future changes and expansions.
1261 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1262 * it does not make sense if PF's field is 0. In this case, PF and VF
1263 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1265 cfg
->pf_rss_size_max
= cfg
->pf_rss_size_max
?
1266 1U << cfg
->pf_rss_size_max
:
1267 cfg
->vf_rss_size_max
;
1269 /* The unit of the tx spare buffer size queried from configuration
1270 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1273 cfg
->tx_spare_buf_size
= hnae3_get_field(__le32_to_cpu(req
->param
[2]),
1274 HCLGE_CFG_TX_SPARE_BUF_SIZE_M
,
1275 HCLGE_CFG_TX_SPARE_BUF_SIZE_S
);
1276 cfg
->tx_spare_buf_size
*= HCLGE_TX_SPARE_SIZE_UNIT
;
1279 /* hclge_get_cfg: query the static parameter from flash
1280 * @hdev: pointer to struct hclge_dev
1281 * @hcfg: the config structure to be getted
1283 static int hclge_get_cfg(struct hclge_dev
*hdev
, struct hclge_cfg
*hcfg
)
1285 struct hclge_desc desc
[HCLGE_PF_CFG_DESC_NUM
];
1286 struct hclge_cfg_param_cmd
*req
;
1290 for (i
= 0; i
< HCLGE_PF_CFG_DESC_NUM
; i
++) {
1293 req
= (struct hclge_cfg_param_cmd
*)desc
[i
].data
;
1294 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_CFG_PARAM
,
1296 hnae3_set_field(offset
, HCLGE_CFG_OFFSET_M
,
1297 HCLGE_CFG_OFFSET_S
, i
* HCLGE_CFG_RD_LEN_BYTES
);
1298 /* Len should be united by 4 bytes when send to hardware */
1299 hnae3_set_field(offset
, HCLGE_CFG_RD_LEN_M
, HCLGE_CFG_RD_LEN_S
,
1300 HCLGE_CFG_RD_LEN_BYTES
/ HCLGE_CFG_RD_LEN_UNIT
);
1301 req
->offset
= cpu_to_le32(offset
);
1304 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PF_CFG_DESC_NUM
);
1306 dev_err(&hdev
->pdev
->dev
, "get config failed %d.\n", ret
);
1310 hclge_parse_cfg(hcfg
, desc
);
1315 static void hclge_set_default_dev_specs(struct hclge_dev
*hdev
)
1317 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1319 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1321 ae_dev
->dev_specs
.max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1322 ae_dev
->dev_specs
.rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1323 ae_dev
->dev_specs
.rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1324 ae_dev
->dev_specs
.max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1325 ae_dev
->dev_specs
.max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1326 ae_dev
->dev_specs
.max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1327 ae_dev
->dev_specs
.max_qset_num
= HCLGE_MAX_QSET_NUM
;
1328 ae_dev
->dev_specs
.umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1329 ae_dev
->dev_specs
.tnl_num
= 0;
1332 static void hclge_parse_dev_specs(struct hclge_dev
*hdev
,
1333 struct hclge_desc
*desc
)
1335 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1336 struct hclge_dev_specs_0_cmd
*req0
;
1337 struct hclge_dev_specs_1_cmd
*req1
;
1339 req0
= (struct hclge_dev_specs_0_cmd
*)desc
[0].data
;
1340 req1
= (struct hclge_dev_specs_1_cmd
*)desc
[1].data
;
1342 ae_dev
->dev_specs
.max_non_tso_bd_num
= req0
->max_non_tso_bd_num
;
1343 ae_dev
->dev_specs
.rss_ind_tbl_size
=
1344 le16_to_cpu(req0
->rss_ind_tbl_size
);
1345 ae_dev
->dev_specs
.int_ql_max
= le16_to_cpu(req0
->int_ql_max
);
1346 ae_dev
->dev_specs
.rss_key_size
= le16_to_cpu(req0
->rss_key_size
);
1347 ae_dev
->dev_specs
.max_tm_rate
= le32_to_cpu(req0
->max_tm_rate
);
1348 ae_dev
->dev_specs
.max_qset_num
= le16_to_cpu(req1
->max_qset_num
);
1349 ae_dev
->dev_specs
.max_int_gl
= le16_to_cpu(req1
->max_int_gl
);
1350 ae_dev
->dev_specs
.max_frm_size
= le16_to_cpu(req1
->max_frm_size
);
1351 ae_dev
->dev_specs
.umv_size
= le16_to_cpu(req1
->umv_size
);
1352 ae_dev
->dev_specs
.mc_mac_size
= le16_to_cpu(req1
->mc_mac_size
);
1353 ae_dev
->dev_specs
.tnl_num
= req1
->tnl_num
;
1356 static void hclge_check_dev_specs(struct hclge_dev
*hdev
)
1358 struct hnae3_dev_specs
*dev_specs
= &hdev
->ae_dev
->dev_specs
;
1360 if (!dev_specs
->max_non_tso_bd_num
)
1361 dev_specs
->max_non_tso_bd_num
= HCLGE_MAX_NON_TSO_BD_NUM
;
1362 if (!dev_specs
->rss_ind_tbl_size
)
1363 dev_specs
->rss_ind_tbl_size
= HCLGE_RSS_IND_TBL_SIZE
;
1364 if (!dev_specs
->rss_key_size
)
1365 dev_specs
->rss_key_size
= HCLGE_COMM_RSS_KEY_SIZE
;
1366 if (!dev_specs
->max_tm_rate
)
1367 dev_specs
->max_tm_rate
= HCLGE_ETHER_MAX_RATE
;
1368 if (!dev_specs
->max_qset_num
)
1369 dev_specs
->max_qset_num
= HCLGE_MAX_QSET_NUM
;
1370 if (!dev_specs
->max_int_gl
)
1371 dev_specs
->max_int_gl
= HCLGE_DEF_MAX_INT_GL
;
1372 if (!dev_specs
->max_frm_size
)
1373 dev_specs
->max_frm_size
= HCLGE_MAC_MAX_FRAME
;
1374 if (!dev_specs
->umv_size
)
1375 dev_specs
->umv_size
= HCLGE_DEFAULT_UMV_SPACE_PER_PF
;
1378 static int hclge_query_mac_stats_num(struct hclge_dev
*hdev
)
1383 ret
= hclge_mac_query_reg_num(hdev
, ®_num
);
1384 if (ret
&& ret
!= -EOPNOTSUPP
)
1387 hdev
->ae_dev
->dev_specs
.mac_stats_num
= reg_num
;
1391 static int hclge_query_dev_specs(struct hclge_dev
*hdev
)
1393 struct hclge_desc desc
[HCLGE_QUERY_DEV_SPECS_BD_NUM
];
1397 ret
= hclge_query_mac_stats_num(hdev
);
1401 /* set default specifications as devices lower than version V3 do not
1402 * support querying specifications from firmware.
1404 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
1405 hclge_set_default_dev_specs(hdev
);
1409 for (i
= 0; i
< HCLGE_QUERY_DEV_SPECS_BD_NUM
- 1; i
++) {
1410 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
,
1412 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
1414 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_DEV_SPECS
, true);
1416 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_QUERY_DEV_SPECS_BD_NUM
);
1420 hclge_parse_dev_specs(hdev
, desc
);
1421 hclge_check_dev_specs(hdev
);
1426 static int hclge_get_cap(struct hclge_dev
*hdev
)
1430 ret
= hclge_query_function_status(hdev
);
1432 dev_err(&hdev
->pdev
->dev
,
1433 "query function status error %d.\n", ret
);
1437 /* get pf resource */
1438 return hclge_query_pf_resource(hdev
);
1441 static void hclge_init_kdump_kernel_config(struct hclge_dev
*hdev
)
1443 #define HCLGE_MIN_TX_DESC 64
1444 #define HCLGE_MIN_RX_DESC 64
1446 if (!is_kdump_kernel())
1449 dev_info(&hdev
->pdev
->dev
,
1450 "Running kdump kernel. Using minimal resources\n");
1452 /* minimal queue pairs equals to the number of vports */
1453 hdev
->num_tqps
= hdev
->num_req_vfs
+ 1;
1454 hdev
->num_tx_desc
= HCLGE_MIN_TX_DESC
;
1455 hdev
->num_rx_desc
= HCLGE_MIN_RX_DESC
;
1458 static void hclge_init_tc_config(struct hclge_dev
*hdev
)
1462 if (hdev
->tc_max
> HNAE3_MAX_TC
||
1464 dev_warn(&hdev
->pdev
->dev
, "TC num = %u.\n",
1469 /* Dev does not support DCB */
1470 if (!hnae3_dev_dcb_supported(hdev
)) {
1474 hdev
->pfc_max
= hdev
->tc_max
;
1477 hdev
->tm_info
.num_tc
= 1;
1479 /* Currently not support uncontiuous tc */
1480 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1481 hnae3_set_bit(hdev
->hw_tc_map
, i
, 1);
1483 hdev
->tx_sch_mode
= HCLGE_FLAG_TC_BASE_SCH_MODE
;
1486 static int hclge_configure(struct hclge_dev
*hdev
)
1488 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1489 struct hclge_cfg cfg
;
1492 ret
= hclge_get_cfg(hdev
, &cfg
);
1496 hdev
->base_tqp_pid
= 0;
1497 hdev
->vf_rss_size_max
= cfg
.vf_rss_size_max
;
1498 hdev
->pf_rss_size_max
= cfg
.pf_rss_size_max
;
1499 hdev
->rx_buf_len
= cfg
.rx_buf_len
;
1500 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, cfg
.mac_addr
);
1501 hdev
->hw
.mac
.media_type
= cfg
.media_type
;
1502 hdev
->hw
.mac
.phy_addr
= cfg
.phy_addr
;
1503 hdev
->num_tx_desc
= cfg
.tqp_desc_num
;
1504 hdev
->num_rx_desc
= cfg
.tqp_desc_num
;
1505 hdev
->tm_info
.num_pg
= 1;
1506 hdev
->tc_max
= cfg
.tc_num
;
1507 hdev
->tm_info
.hw_pfc_map
= 0;
1509 hdev
->wanted_umv_size
= cfg
.umv_space
;
1511 hdev
->wanted_umv_size
= hdev
->ae_dev
->dev_specs
.umv_size
;
1512 hdev
->tx_spare_buf_size
= cfg
.tx_spare_buf_size
;
1513 hdev
->gro_en
= true;
1514 if (cfg
.vlan_fliter_cap
== HCLGE_VLAN_FLTR_CAN_MDF
)
1515 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
);
1517 if (hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
1519 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
1522 ret
= hclge_parse_speed(cfg
.default_speed
, &hdev
->hw
.mac
.speed
);
1524 dev_err(&hdev
->pdev
->dev
, "failed to parse speed %u, ret = %d\n",
1525 cfg
.default_speed
, ret
);
1529 hclge_parse_link_mode(hdev
, cfg
.speed_ability
);
1531 hdev
->hw
.mac
.max_speed
= hclge_get_max_speed(cfg
.speed_ability
);
1533 hclge_init_tc_config(hdev
);
1534 hclge_init_kdump_kernel_config(hdev
);
1539 static int hclge_config_tso(struct hclge_dev
*hdev
, u16 tso_mss_min
,
1542 struct hclge_cfg_tso_status_cmd
*req
;
1543 struct hclge_desc desc
;
1545 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TSO_GENERIC_CONFIG
, false);
1547 req
= (struct hclge_cfg_tso_status_cmd
*)desc
.data
;
1548 req
->tso_mss_min
= cpu_to_le16(tso_mss_min
);
1549 req
->tso_mss_max
= cpu_to_le16(tso_mss_max
);
1551 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1554 static int hclge_config_gro(struct hclge_dev
*hdev
)
1556 struct hclge_cfg_gro_status_cmd
*req
;
1557 struct hclge_desc desc
;
1560 if (!hnae3_ae_dev_gro_supported(hdev
->ae_dev
))
1563 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GRO_GENERIC_CONFIG
, false);
1564 req
= (struct hclge_cfg_gro_status_cmd
*)desc
.data
;
1566 req
->gro_en
= hdev
->gro_en
? 1 : 0;
1568 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1570 dev_err(&hdev
->pdev
->dev
,
1571 "GRO hardware config cmd failed, ret = %d\n", ret
);
1576 static int hclge_alloc_tqps(struct hclge_dev
*hdev
)
1578 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
1579 struct hclge_comm_tqp
*tqp
;
1582 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
1583 sizeof(struct hclge_comm_tqp
), GFP_KERNEL
);
1589 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1590 tqp
->dev
= &hdev
->pdev
->dev
;
1593 tqp
->q
.ae_algo
= &ae_algo
;
1594 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
1595 tqp
->q
.tx_desc_num
= hdev
->num_tx_desc
;
1596 tqp
->q
.rx_desc_num
= hdev
->num_rx_desc
;
1598 /* need an extended offset to configure queues >=
1599 * HCLGE_TQP_MAX_SIZE_DEV_V2
1601 if (i
< HCLGE_TQP_MAX_SIZE_DEV_V2
)
1602 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1603 HCLGE_TQP_REG_OFFSET
+
1604 i
* HCLGE_TQP_REG_SIZE
;
1606 tqp
->q
.io_base
= hdev
->hw
.hw
.io_base
+
1607 HCLGE_TQP_REG_OFFSET
+
1608 HCLGE_TQP_EXT_REG_OFFSET
+
1609 (i
- HCLGE_TQP_MAX_SIZE_DEV_V2
) *
1612 /* when device supports tx push and has device memory,
1613 * the queue can execute push mode or doorbell mode on
1616 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
1617 tqp
->q
.mem_base
= hdev
->hw
.hw
.mem_base
+
1618 HCLGE_TQP_MEM_OFFSET(hdev
, i
);
1626 static int hclge_map_tqps_to_func(struct hclge_dev
*hdev
, u16 func_id
,
1627 u16 tqp_pid
, u16 tqp_vid
, bool is_pf
)
1629 struct hclge_tqp_map_cmd
*req
;
1630 struct hclge_desc desc
;
1633 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_SET_TQP_MAP
, false);
1635 req
= (struct hclge_tqp_map_cmd
*)desc
.data
;
1636 req
->tqp_id
= cpu_to_le16(tqp_pid
);
1637 req
->tqp_vf
= func_id
;
1638 req
->tqp_flag
= 1U << HCLGE_TQP_MAP_EN_B
;
1640 req
->tqp_flag
|= 1U << HCLGE_TQP_MAP_TYPE_B
;
1641 req
->tqp_vid
= cpu_to_le16(tqp_vid
);
1643 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1645 dev_err(&hdev
->pdev
->dev
, "TQP map failed %d.\n", ret
);
1650 static int hclge_assign_tqp(struct hclge_vport
*vport
, u16 num_tqps
)
1652 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1653 struct hclge_dev
*hdev
= vport
->back
;
1656 for (i
= 0, alloced
= 0; i
< hdev
->num_tqps
&&
1657 alloced
< num_tqps
; i
++) {
1658 if (!hdev
->htqp
[i
].alloced
) {
1659 hdev
->htqp
[i
].q
.handle
= &vport
->nic
;
1660 hdev
->htqp
[i
].q
.tqp_index
= alloced
;
1661 hdev
->htqp
[i
].q
.tx_desc_num
= kinfo
->num_tx_desc
;
1662 hdev
->htqp
[i
].q
.rx_desc_num
= kinfo
->num_rx_desc
;
1663 kinfo
->tqp
[alloced
] = &hdev
->htqp
[i
].q
;
1664 hdev
->htqp
[i
].alloced
= true;
1668 vport
->alloc_tqps
= alloced
;
1669 kinfo
->rss_size
= min_t(u16
, hdev
->pf_rss_size_max
,
1670 vport
->alloc_tqps
/ hdev
->tm_info
.num_tc
);
1672 /* ensure one to one mapping between irq and queue at default */
1673 kinfo
->rss_size
= min_t(u16
, kinfo
->rss_size
,
1674 (hdev
->num_nic_msi
- 1) / hdev
->tm_info
.num_tc
);
1679 static int hclge_knic_setup(struct hclge_vport
*vport
, u16 num_tqps
,
1680 u16 num_tx_desc
, u16 num_rx_desc
)
1683 struct hnae3_handle
*nic
= &vport
->nic
;
1684 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1685 struct hclge_dev
*hdev
= vport
->back
;
1688 kinfo
->num_tx_desc
= num_tx_desc
;
1689 kinfo
->num_rx_desc
= num_rx_desc
;
1691 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
1692 kinfo
->tx_spare_buf_size
= hdev
->tx_spare_buf_size
;
1694 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, num_tqps
,
1695 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
1699 ret
= hclge_assign_tqp(vport
, num_tqps
);
1701 dev_err(&hdev
->pdev
->dev
, "fail to assign TQPs %d.\n", ret
);
1706 static int hclge_map_tqp_to_vport(struct hclge_dev
*hdev
,
1707 struct hclge_vport
*vport
)
1709 struct hnae3_handle
*nic
= &vport
->nic
;
1710 struct hnae3_knic_private_info
*kinfo
;
1713 kinfo
= &nic
->kinfo
;
1714 for (i
= 0; i
< vport
->alloc_tqps
; i
++) {
1715 struct hclge_comm_tqp
*q
=
1716 container_of(kinfo
->tqp
[i
], struct hclge_comm_tqp
, q
);
1720 is_pf
= !(vport
->vport_id
);
1721 ret
= hclge_map_tqps_to_func(hdev
, vport
->vport_id
, q
->index
,
1730 static int hclge_map_tqp(struct hclge_dev
*hdev
)
1732 struct hclge_vport
*vport
= hdev
->vport
;
1735 num_vport
= hdev
->num_req_vfs
+ 1;
1736 for (i
= 0; i
< num_vport
; i
++) {
1739 ret
= hclge_map_tqp_to_vport(hdev
, vport
);
1749 static int hclge_vport_setup(struct hclge_vport
*vport
, u16 num_tqps
)
1751 struct hnae3_handle
*nic
= &vport
->nic
;
1752 struct hclge_dev
*hdev
= vport
->back
;
1755 nic
->pdev
= hdev
->pdev
;
1756 nic
->ae_algo
= &ae_algo
;
1757 nic
->numa_node_mask
= hdev
->numa_node_mask
;
1758 nic
->kinfo
.io_base
= hdev
->hw
.hw
.io_base
;
1760 ret
= hclge_knic_setup(vport
, num_tqps
,
1761 hdev
->num_tx_desc
, hdev
->num_rx_desc
);
1763 dev_err(&hdev
->pdev
->dev
, "knic setup failed %d\n", ret
);
1768 static int hclge_alloc_vport(struct hclge_dev
*hdev
)
1770 struct pci_dev
*pdev
= hdev
->pdev
;
1771 struct hclge_vport
*vport
;
1777 /* We need to alloc a vport for main NIC of PF */
1778 num_vport
= hdev
->num_req_vfs
+ 1;
1780 if (hdev
->num_tqps
< num_vport
) {
1781 dev_err(&hdev
->pdev
->dev
, "tqps(%u) is less than vports(%d)",
1782 hdev
->num_tqps
, num_vport
);
1786 /* Alloc the same number of TQPs for every vport */
1787 tqp_per_vport
= hdev
->num_tqps
/ num_vport
;
1788 tqp_main_vport
= tqp_per_vport
+ hdev
->num_tqps
% num_vport
;
1790 vport
= devm_kcalloc(&pdev
->dev
, num_vport
, sizeof(struct hclge_vport
),
1795 hdev
->vport
= vport
;
1796 hdev
->num_alloc_vport
= num_vport
;
1798 if (IS_ENABLED(CONFIG_PCI_IOV
))
1799 hdev
->num_alloc_vfs
= hdev
->num_req_vfs
;
1801 for (i
= 0; i
< num_vport
; i
++) {
1803 vport
->vport_id
= i
;
1804 vport
->vf_info
.link_state
= IFLA_VF_LINK_STATE_AUTO
;
1805 vport
->mps
= HCLGE_MAC_DEFAULT_FRAME
;
1806 vport
->port_base_vlan_cfg
.state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
1807 vport
->port_base_vlan_cfg
.tbl_sta
= true;
1808 vport
->rxvlan_cfg
.rx_vlan_offload_en
= true;
1809 vport
->req_vlan_fltr_en
= true;
1810 INIT_LIST_HEAD(&vport
->vlan_list
);
1811 INIT_LIST_HEAD(&vport
->uc_mac_list
);
1812 INIT_LIST_HEAD(&vport
->mc_mac_list
);
1813 spin_lock_init(&vport
->mac_list_lock
);
1816 ret
= hclge_vport_setup(vport
, tqp_main_vport
);
1818 ret
= hclge_vport_setup(vport
, tqp_per_vport
);
1821 "vport setup failed for vport %d, %d\n",
1832 static int hclge_cmd_alloc_tx_buff(struct hclge_dev
*hdev
,
1833 struct hclge_pkt_buf_alloc
*buf_alloc
)
1835 /* TX buffer size is unit by 128 byte */
1836 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1837 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1838 struct hclge_tx_buff_alloc_cmd
*req
;
1839 struct hclge_desc desc
;
1843 req
= (struct hclge_tx_buff_alloc_cmd
*)desc
.data
;
1845 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TX_BUFF_ALLOC
, 0);
1846 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1847 u32 buf_size
= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1849 req
->tx_pkt_buff
[i
] =
1850 cpu_to_le16((buf_size
>> HCLGE_BUF_SIZE_UNIT_SHIFT
) |
1851 HCLGE_BUF_SIZE_UPDATE_EN_MSK
);
1854 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1856 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc cmd failed %d.\n",
1862 static int hclge_tx_buffer_alloc(struct hclge_dev
*hdev
,
1863 struct hclge_pkt_buf_alloc
*buf_alloc
)
1865 int ret
= hclge_cmd_alloc_tx_buff(hdev
, buf_alloc
);
1868 dev_err(&hdev
->pdev
->dev
, "tx buffer alloc failed %d\n", ret
);
1873 static u32
hclge_get_tc_num(struct hclge_dev
*hdev
)
1878 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1879 if (hdev
->hw_tc_map
& BIT(i
))
1884 /* Get the number of pfc enabled TCs, which have private buffer */
1885 static int hclge_get_pfc_priv_num(struct hclge_dev
*hdev
,
1886 struct hclge_pkt_buf_alloc
*buf_alloc
)
1888 struct hclge_priv_buf
*priv
;
1892 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1893 priv
= &buf_alloc
->priv_buf
[i
];
1894 if ((hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1902 /* Get the number of pfc disabled TCs, which have private buffer */
1903 static int hclge_get_no_pfc_priv_num(struct hclge_dev
*hdev
,
1904 struct hclge_pkt_buf_alloc
*buf_alloc
)
1906 struct hclge_priv_buf
*priv
;
1910 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1911 priv
= &buf_alloc
->priv_buf
[i
];
1912 if (hdev
->hw_tc_map
& BIT(i
) &&
1913 !(hdev
->tm_info
.hw_pfc_map
& BIT(i
)) &&
1921 static u32
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1923 struct hclge_priv_buf
*priv
;
1927 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
1928 priv
= &buf_alloc
->priv_buf
[i
];
1930 rx_priv
+= priv
->buf_size
;
1935 static u32
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc
*buf_alloc
)
1937 u32 i
, total_tx_size
= 0;
1939 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
1940 total_tx_size
+= buf_alloc
->priv_buf
[i
].tx_buf_size
;
1942 return total_tx_size
;
1945 static bool hclge_is_rx_buf_ok(struct hclge_dev
*hdev
,
1946 struct hclge_pkt_buf_alloc
*buf_alloc
,
1949 u32 shared_buf_min
, shared_buf_tc
, shared_std
, hi_thrd
, lo_thrd
;
1950 u32 tc_num
= hclge_get_tc_num(hdev
);
1951 u32 shared_buf
, aligned_mps
;
1955 aligned_mps
= roundup(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
1957 if (hnae3_dev_dcb_supported(hdev
))
1958 shared_buf_min
= HCLGE_BUF_MUL_BY
* aligned_mps
+
1961 shared_buf_min
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
1962 + hdev
->dv_buf_size
;
1964 shared_buf_tc
= tc_num
* aligned_mps
+ aligned_mps
;
1965 shared_std
= roundup(max_t(u32
, shared_buf_min
, shared_buf_tc
),
1966 HCLGE_BUF_SIZE_UNIT
);
1968 rx_priv
= hclge_get_rx_priv_buff_alloced(buf_alloc
);
1969 if (rx_all
< rx_priv
+ shared_std
)
1972 shared_buf
= rounddown(rx_all
- rx_priv
, HCLGE_BUF_SIZE_UNIT
);
1973 buf_alloc
->s_buf
.buf_size
= shared_buf
;
1974 if (hnae3_dev_dcb_supported(hdev
)) {
1975 buf_alloc
->s_buf
.self
.high
= shared_buf
- hdev
->dv_buf_size
;
1976 buf_alloc
->s_buf
.self
.low
= buf_alloc
->s_buf
.self
.high
1977 - roundup(aligned_mps
/ HCLGE_BUF_DIV_BY
,
1978 HCLGE_BUF_SIZE_UNIT
);
1980 buf_alloc
->s_buf
.self
.high
= aligned_mps
+
1981 HCLGE_NON_DCB_ADDITIONAL_BUF
;
1982 buf_alloc
->s_buf
.self
.low
= aligned_mps
;
1985 if (hnae3_dev_dcb_supported(hdev
)) {
1986 hi_thrd
= shared_buf
- hdev
->dv_buf_size
;
1988 if (tc_num
<= NEED_RESERVE_TC_NUM
)
1989 hi_thrd
= hi_thrd
* BUF_RESERVE_PERCENT
1993 hi_thrd
= hi_thrd
/ tc_num
;
1995 hi_thrd
= max_t(u32
, hi_thrd
, HCLGE_BUF_MUL_BY
* aligned_mps
);
1996 hi_thrd
= rounddown(hi_thrd
, HCLGE_BUF_SIZE_UNIT
);
1997 lo_thrd
= hi_thrd
- aligned_mps
/ HCLGE_BUF_DIV_BY
;
1999 hi_thrd
= aligned_mps
+ HCLGE_NON_DCB_ADDITIONAL_BUF
;
2000 lo_thrd
= aligned_mps
;
2003 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2004 buf_alloc
->s_buf
.tc_thrd
[i
].low
= lo_thrd
;
2005 buf_alloc
->s_buf
.tc_thrd
[i
].high
= hi_thrd
;
2011 static int hclge_tx_buffer_calc(struct hclge_dev
*hdev
,
2012 struct hclge_pkt_buf_alloc
*buf_alloc
)
2016 total_size
= hdev
->pkt_buf_size
;
2018 /* alloc tx buffer for all enabled tc */
2019 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2020 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2022 if (hdev
->hw_tc_map
& BIT(i
)) {
2023 if (total_size
< hdev
->tx_buf_size
)
2026 priv
->tx_buf_size
= hdev
->tx_buf_size
;
2028 priv
->tx_buf_size
= 0;
2031 total_size
-= priv
->tx_buf_size
;
2037 static bool hclge_rx_buf_calc_all(struct hclge_dev
*hdev
, bool max
,
2038 struct hclge_pkt_buf_alloc
*buf_alloc
)
2040 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2041 u32 aligned_mps
= round_up(hdev
->mps
, HCLGE_BUF_SIZE_UNIT
);
2044 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2045 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2052 if (!(hdev
->hw_tc_map
& BIT(i
)))
2057 if (hdev
->tm_info
.hw_pfc_map
& BIT(i
)) {
2058 priv
->wl
.low
= max
? aligned_mps
: HCLGE_BUF_SIZE_UNIT
;
2059 priv
->wl
.high
= roundup(priv
->wl
.low
+ aligned_mps
,
2060 HCLGE_BUF_SIZE_UNIT
);
2063 priv
->wl
.high
= max
? (aligned_mps
* HCLGE_BUF_MUL_BY
) :
2067 priv
->buf_size
= priv
->wl
.high
+ hdev
->dv_buf_size
;
2070 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2073 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev
*hdev
,
2074 struct hclge_pkt_buf_alloc
*buf_alloc
)
2076 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2077 int no_pfc_priv_num
= hclge_get_no_pfc_priv_num(hdev
, buf_alloc
);
2080 /* let the last to be cleared first */
2081 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2082 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2083 unsigned int mask
= BIT((unsigned int)i
);
2085 if (hdev
->hw_tc_map
& mask
&&
2086 !(hdev
->tm_info
.hw_pfc_map
& mask
)) {
2087 /* Clear the no pfc TC private buffer */
2095 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2096 no_pfc_priv_num
== 0)
2100 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2103 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev
*hdev
,
2104 struct hclge_pkt_buf_alloc
*buf_alloc
)
2106 u32 rx_all
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2107 int pfc_priv_num
= hclge_get_pfc_priv_num(hdev
, buf_alloc
);
2110 /* let the last to be cleared first */
2111 for (i
= HCLGE_MAX_TC_NUM
- 1; i
>= 0; i
--) {
2112 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2113 unsigned int mask
= BIT((unsigned int)i
);
2115 if (hdev
->hw_tc_map
& mask
&&
2116 hdev
->tm_info
.hw_pfc_map
& mask
) {
2117 /* Reduce the number of pfc TC with private buffer */
2125 if (hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
) ||
2130 return hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
);
2133 static int hclge_only_alloc_priv_buff(struct hclge_dev
*hdev
,
2134 struct hclge_pkt_buf_alloc
*buf_alloc
)
2136 #define COMPENSATE_BUFFER 0x3C00
2137 #define COMPENSATE_HALF_MPS_NUM 5
2138 #define PRIV_WL_GAP 0x1800
2140 u32 rx_priv
= hdev
->pkt_buf_size
- hclge_get_tx_buff_alloced(buf_alloc
);
2141 u32 tc_num
= hclge_get_tc_num(hdev
);
2142 u32 half_mps
= hdev
->mps
>> 1;
2147 rx_priv
= rx_priv
/ tc_num
;
2149 if (tc_num
<= NEED_RESERVE_TC_NUM
)
2150 rx_priv
= rx_priv
* BUF_RESERVE_PERCENT
/ BUF_MAX_PERCENT
;
2152 min_rx_priv
= hdev
->dv_buf_size
+ COMPENSATE_BUFFER
+
2153 COMPENSATE_HALF_MPS_NUM
* half_mps
;
2154 min_rx_priv
= round_up(min_rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2155 rx_priv
= round_down(rx_priv
, HCLGE_BUF_SIZE_UNIT
);
2156 if (rx_priv
< min_rx_priv
)
2159 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2160 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2167 if (!(hdev
->hw_tc_map
& BIT(i
)))
2171 priv
->buf_size
= rx_priv
;
2172 priv
->wl
.high
= rx_priv
- hdev
->dv_buf_size
;
2173 priv
->wl
.low
= priv
->wl
.high
- PRIV_WL_GAP
;
2176 buf_alloc
->s_buf
.buf_size
= 0;
2181 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2182 * @hdev: pointer to struct hclge_dev
2183 * @buf_alloc: pointer to buffer calculation data
2184 * @return: 0: calculate successful, negative: fail
2186 static int hclge_rx_buffer_calc(struct hclge_dev
*hdev
,
2187 struct hclge_pkt_buf_alloc
*buf_alloc
)
2189 /* When DCB is not supported, rx private buffer is not allocated. */
2190 if (!hnae3_dev_dcb_supported(hdev
)) {
2191 u32 rx_all
= hdev
->pkt_buf_size
;
2193 rx_all
-= hclge_get_tx_buff_alloced(buf_alloc
);
2194 if (!hclge_is_rx_buf_ok(hdev
, buf_alloc
, rx_all
))
2200 if (hclge_only_alloc_priv_buff(hdev
, buf_alloc
))
2203 if (hclge_rx_buf_calc_all(hdev
, true, buf_alloc
))
2206 /* try to decrease the buffer size */
2207 if (hclge_rx_buf_calc_all(hdev
, false, buf_alloc
))
2210 if (hclge_drop_nopfc_buf_till_fit(hdev
, buf_alloc
))
2213 if (hclge_drop_pfc_buf_till_fit(hdev
, buf_alloc
))
2219 static int hclge_rx_priv_buf_alloc(struct hclge_dev
*hdev
,
2220 struct hclge_pkt_buf_alloc
*buf_alloc
)
2222 struct hclge_rx_priv_buff_cmd
*req
;
2223 struct hclge_desc desc
;
2227 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_PRIV_BUFF_ALLOC
, false);
2228 req
= (struct hclge_rx_priv_buff_cmd
*)desc
.data
;
2230 /* Alloc private buffer TCs */
2231 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
2232 struct hclge_priv_buf
*priv
= &buf_alloc
->priv_buf
[i
];
2235 cpu_to_le16(priv
->buf_size
>> HCLGE_BUF_UNIT_S
);
2237 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B
);
2241 cpu_to_le16((buf_alloc
->s_buf
.buf_size
>> HCLGE_BUF_UNIT_S
) |
2242 (1 << HCLGE_TC0_PRI_BUF_EN_B
));
2244 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2246 dev_err(&hdev
->pdev
->dev
,
2247 "rx private buffer alloc cmd failed %d\n", ret
);
2252 static int hclge_rx_priv_wl_config(struct hclge_dev
*hdev
,
2253 struct hclge_pkt_buf_alloc
*buf_alloc
)
2255 struct hclge_rx_priv_wl_buf
*req
;
2256 struct hclge_priv_buf
*priv
;
2257 struct hclge_desc desc
[2];
2261 for (i
= 0; i
< 2; i
++) {
2262 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_RX_PRIV_WL_ALLOC
,
2264 req
= (struct hclge_rx_priv_wl_buf
*)desc
[i
].data
;
2266 /* The first descriptor set the NEXT bit to 1 */
2268 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2270 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2272 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2273 u32 idx
= i
* HCLGE_TC_NUM_ONE_DESC
+ j
;
2275 priv
= &buf_alloc
->priv_buf
[idx
];
2276 req
->tc_wl
[j
].high
=
2277 cpu_to_le16(priv
->wl
.high
>> HCLGE_BUF_UNIT_S
);
2278 req
->tc_wl
[j
].high
|=
2279 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2281 cpu_to_le16(priv
->wl
.low
>> HCLGE_BUF_UNIT_S
);
2282 req
->tc_wl
[j
].low
|=
2283 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2287 /* Send 2 descriptor at one time */
2288 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2290 dev_err(&hdev
->pdev
->dev
,
2291 "rx private waterline config cmd failed %d\n",
2296 static int hclge_common_thrd_config(struct hclge_dev
*hdev
,
2297 struct hclge_pkt_buf_alloc
*buf_alloc
)
2299 struct hclge_shared_buf
*s_buf
= &buf_alloc
->s_buf
;
2300 struct hclge_rx_com_thrd
*req
;
2301 struct hclge_desc desc
[2];
2302 struct hclge_tc_thrd
*tc
;
2306 for (i
= 0; i
< 2; i
++) {
2307 hclge_cmd_setup_basic_desc(&desc
[i
],
2308 HCLGE_OPC_RX_COM_THRD_ALLOC
, false);
2309 req
= (struct hclge_rx_com_thrd
*)&desc
[i
].data
;
2311 /* The first descriptor set the NEXT bit to 1 */
2313 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2315 desc
[i
].flag
&= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2317 for (j
= 0; j
< HCLGE_TC_NUM_ONE_DESC
; j
++) {
2318 tc
= &s_buf
->tc_thrd
[i
* HCLGE_TC_NUM_ONE_DESC
+ j
];
2320 req
->com_thrd
[j
].high
=
2321 cpu_to_le16(tc
->high
>> HCLGE_BUF_UNIT_S
);
2322 req
->com_thrd
[j
].high
|=
2323 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2324 req
->com_thrd
[j
].low
=
2325 cpu_to_le16(tc
->low
>> HCLGE_BUF_UNIT_S
);
2326 req
->com_thrd
[j
].low
|=
2327 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2331 /* Send 2 descriptors at one time */
2332 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
2334 dev_err(&hdev
->pdev
->dev
,
2335 "common threshold config cmd failed %d\n", ret
);
2339 static int hclge_common_wl_config(struct hclge_dev
*hdev
,
2340 struct hclge_pkt_buf_alloc
*buf_alloc
)
2342 struct hclge_shared_buf
*buf
= &buf_alloc
->s_buf
;
2343 struct hclge_rx_com_wl
*req
;
2344 struct hclge_desc desc
;
2347 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RX_COM_WL_ALLOC
, false);
2349 req
= (struct hclge_rx_com_wl
*)desc
.data
;
2350 req
->com_wl
.high
= cpu_to_le16(buf
->self
.high
>> HCLGE_BUF_UNIT_S
);
2351 req
->com_wl
.high
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2353 req
->com_wl
.low
= cpu_to_le16(buf
->self
.low
>> HCLGE_BUF_UNIT_S
);
2354 req
->com_wl
.low
|= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B
));
2356 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2358 dev_err(&hdev
->pdev
->dev
,
2359 "common waterline config cmd failed %d\n", ret
);
2364 int hclge_buffer_alloc(struct hclge_dev
*hdev
)
2366 struct hclge_pkt_buf_alloc
*pkt_buf
;
2369 pkt_buf
= kzalloc(sizeof(*pkt_buf
), GFP_KERNEL
);
2373 ret
= hclge_tx_buffer_calc(hdev
, pkt_buf
);
2375 dev_err(&hdev
->pdev
->dev
,
2376 "could not calc tx buffer size for all TCs %d\n", ret
);
2380 ret
= hclge_tx_buffer_alloc(hdev
, pkt_buf
);
2382 dev_err(&hdev
->pdev
->dev
,
2383 "could not alloc tx buffers %d\n", ret
);
2387 ret
= hclge_rx_buffer_calc(hdev
, pkt_buf
);
2389 dev_err(&hdev
->pdev
->dev
,
2390 "could not calc rx priv buffer size for all TCs %d\n",
2395 ret
= hclge_rx_priv_buf_alloc(hdev
, pkt_buf
);
2397 dev_err(&hdev
->pdev
->dev
, "could not alloc rx priv buffer %d\n",
2402 if (hnae3_dev_dcb_supported(hdev
)) {
2403 ret
= hclge_rx_priv_wl_config(hdev
, pkt_buf
);
2405 dev_err(&hdev
->pdev
->dev
,
2406 "could not configure rx private waterline %d\n",
2411 ret
= hclge_common_thrd_config(hdev
, pkt_buf
);
2413 dev_err(&hdev
->pdev
->dev
,
2414 "could not configure common threshold %d\n",
2420 ret
= hclge_common_wl_config(hdev
, pkt_buf
);
2422 dev_err(&hdev
->pdev
->dev
,
2423 "could not configure common waterline %d\n", ret
);
2430 static int hclge_init_roce_base_info(struct hclge_vport
*vport
)
2432 struct hnae3_handle
*roce
= &vport
->roce
;
2433 struct hnae3_handle
*nic
= &vport
->nic
;
2434 struct hclge_dev
*hdev
= vport
->back
;
2436 roce
->rinfo
.num_vectors
= vport
->back
->num_roce_msi
;
2438 if (hdev
->num_msi
< hdev
->num_nic_msi
+ hdev
->num_roce_msi
)
2441 roce
->rinfo
.base_vector
= hdev
->num_nic_msi
;
2443 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
2444 roce
->rinfo
.roce_io_base
= hdev
->hw
.hw
.io_base
;
2445 roce
->rinfo
.roce_mem_base
= hdev
->hw
.hw
.mem_base
;
2447 roce
->pdev
= nic
->pdev
;
2448 roce
->ae_algo
= nic
->ae_algo
;
2449 roce
->numa_node_mask
= nic
->numa_node_mask
;
2454 static int hclge_init_msi(struct hclge_dev
*hdev
)
2456 struct pci_dev
*pdev
= hdev
->pdev
;
2460 vectors
= pci_alloc_irq_vectors(pdev
, HNAE3_MIN_VECTOR_NUM
,
2462 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
2465 "failed(%d) to allocate MSI/MSI-X vectors\n",
2469 if (vectors
< hdev
->num_msi
)
2470 dev_warn(&hdev
->pdev
->dev
,
2471 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2472 hdev
->num_msi
, vectors
);
2474 hdev
->num_msi
= vectors
;
2475 hdev
->num_msi_left
= vectors
;
2477 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2478 sizeof(u16
), GFP_KERNEL
);
2479 if (!hdev
->vector_status
) {
2480 pci_free_irq_vectors(pdev
);
2484 for (i
= 0; i
< hdev
->num_msi
; i
++)
2485 hdev
->vector_status
[i
] = HCLGE_INVALID_VPORT
;
2487 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
2488 sizeof(int), GFP_KERNEL
);
2489 if (!hdev
->vector_irq
) {
2490 pci_free_irq_vectors(pdev
);
2497 static u8
hclge_check_speed_dup(u8 duplex
, int speed
)
2499 if (!(speed
== HCLGE_MAC_SPEED_10M
|| speed
== HCLGE_MAC_SPEED_100M
))
2500 duplex
= HCLGE_MAC_FULL
;
2505 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw
[] = {
2506 {HCLGE_MAC_SPEED_10M
, HCLGE_FW_MAC_SPEED_10M
},
2507 {HCLGE_MAC_SPEED_100M
, HCLGE_FW_MAC_SPEED_100M
},
2508 {HCLGE_MAC_SPEED_1G
, HCLGE_FW_MAC_SPEED_1G
},
2509 {HCLGE_MAC_SPEED_10G
, HCLGE_FW_MAC_SPEED_10G
},
2510 {HCLGE_MAC_SPEED_25G
, HCLGE_FW_MAC_SPEED_25G
},
2511 {HCLGE_MAC_SPEED_40G
, HCLGE_FW_MAC_SPEED_40G
},
2512 {HCLGE_MAC_SPEED_50G
, HCLGE_FW_MAC_SPEED_50G
},
2513 {HCLGE_MAC_SPEED_100G
, HCLGE_FW_MAC_SPEED_100G
},
2514 {HCLGE_MAC_SPEED_200G
, HCLGE_FW_MAC_SPEED_200G
},
2517 static int hclge_convert_to_fw_speed(u32 speed_drv
, u32
*speed_fw
)
2521 for (i
= 0; i
< ARRAY_SIZE(hclge_mac_speed_map_to_fw
); i
++) {
2522 if (hclge_mac_speed_map_to_fw
[i
].speed_drv
== speed_drv
) {
2523 *speed_fw
= hclge_mac_speed_map_to_fw
[i
].speed_fw
;
2531 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev
*hdev
, int speed
,
2532 u8 duplex
, u8 lane_num
)
2534 struct hclge_config_mac_speed_dup_cmd
*req
;
2535 struct hclge_desc desc
;
2539 req
= (struct hclge_config_mac_speed_dup_cmd
*)desc
.data
;
2541 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_SPEED_DUP
, false);
2544 hnae3_set_bit(req
->speed_dup
, HCLGE_CFG_DUPLEX_B
, 1);
2546 ret
= hclge_convert_to_fw_speed(speed
, &speed_fw
);
2548 dev_err(&hdev
->pdev
->dev
, "invalid speed (%d)\n", speed
);
2552 hnae3_set_field(req
->speed_dup
, HCLGE_CFG_SPEED_M
, HCLGE_CFG_SPEED_S
,
2554 hnae3_set_bit(req
->mac_change_fec_en
, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B
,
2556 req
->lane_num
= lane_num
;
2558 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2560 dev_err(&hdev
->pdev
->dev
,
2561 "mac speed/duplex config cmd failed %d.\n", ret
);
2568 int hclge_cfg_mac_speed_dup(struct hclge_dev
*hdev
, int speed
, u8 duplex
, u8 lane_num
)
2570 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2573 duplex
= hclge_check_speed_dup(duplex
, speed
);
2574 if (!mac
->support_autoneg
&& mac
->speed
== speed
&&
2575 mac
->duplex
== duplex
&& (mac
->lane_num
== lane_num
|| lane_num
== 0))
2578 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, speed
, duplex
, lane_num
);
2582 hdev
->hw
.mac
.speed
= speed
;
2583 hdev
->hw
.mac
.duplex
= duplex
;
2585 hdev
->hw
.mac
.lane_num
= lane_num
;
2590 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle
*handle
, int speed
,
2591 u8 duplex
, u8 lane_num
)
2593 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2594 struct hclge_dev
*hdev
= vport
->back
;
2596 return hclge_cfg_mac_speed_dup(hdev
, speed
, duplex
, lane_num
);
2599 static int hclge_set_autoneg_en(struct hclge_dev
*hdev
, bool enable
)
2601 struct hclge_config_auto_neg_cmd
*req
;
2602 struct hclge_desc desc
;
2606 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_AN_MODE
, false);
2608 req
= (struct hclge_config_auto_neg_cmd
*)desc
.data
;
2610 hnae3_set_bit(flag
, HCLGE_MAC_CFG_AN_EN_B
, 1U);
2611 req
->cfg_an_cmd_flag
= cpu_to_le32(flag
);
2613 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2615 dev_err(&hdev
->pdev
->dev
, "auto neg set cmd failed %d.\n",
2621 static int hclge_set_autoneg(struct hnae3_handle
*handle
, bool enable
)
2623 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2624 struct hclge_dev
*hdev
= vport
->back
;
2626 if (!hdev
->hw
.mac
.support_autoneg
) {
2628 dev_err(&hdev
->pdev
->dev
,
2629 "autoneg is not supported by current port\n");
2636 return hclge_set_autoneg_en(hdev
, enable
);
2639 static int hclge_get_autoneg(struct hnae3_handle
*handle
)
2641 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2642 struct hclge_dev
*hdev
= vport
->back
;
2643 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2646 return phydev
->autoneg
;
2648 return hdev
->hw
.mac
.autoneg
;
2651 static int hclge_restart_autoneg(struct hnae3_handle
*handle
)
2653 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2654 struct hclge_dev
*hdev
= vport
->back
;
2657 dev_dbg(&hdev
->pdev
->dev
, "restart autoneg\n");
2659 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
2662 return hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
2665 static int hclge_halt_autoneg(struct hnae3_handle
*handle
, bool halt
)
2667 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2668 struct hclge_dev
*hdev
= vport
->back
;
2670 if (hdev
->hw
.mac
.support_autoneg
&& hdev
->hw
.mac
.autoneg
)
2671 return hclge_set_autoneg_en(hdev
, !halt
);
2676 static void hclge_parse_fec_stats_lanes(struct hclge_dev
*hdev
,
2677 struct hclge_desc
*desc
, u32 desc_len
)
2679 u32 lane_size
= HCLGE_FEC_STATS_MAX_LANES
* 2;
2684 for (i
= 0; i
< lane_size
; i
++) {
2685 if (data_index
>= HCLGE_DESC_DATA_LEN
) {
2690 if (desc_index
>= desc_len
)
2693 hdev
->fec_stats
.per_lanes
[i
] +=
2694 le32_to_cpu(desc
[desc_index
].data
[data_index
]);
2699 static void hclge_parse_fec_stats(struct hclge_dev
*hdev
,
2700 struct hclge_desc
*desc
, u32 desc_len
)
2702 struct hclge_query_fec_stats_cmd
*req
;
2704 req
= (struct hclge_query_fec_stats_cmd
*)desc
[0].data
;
2706 hdev
->fec_stats
.base_r_lane_num
= req
->base_r_lane_num
;
2707 hdev
->fec_stats
.rs_corr_blocks
+=
2708 le32_to_cpu(req
->rs_fec_corr_blocks
);
2709 hdev
->fec_stats
.rs_uncorr_blocks
+=
2710 le32_to_cpu(req
->rs_fec_uncorr_blocks
);
2711 hdev
->fec_stats
.rs_error_blocks
+=
2712 le32_to_cpu(req
->rs_fec_error_blocks
);
2713 hdev
->fec_stats
.base_r_corr_blocks
+=
2714 le32_to_cpu(req
->base_r_fec_corr_blocks
);
2715 hdev
->fec_stats
.base_r_uncorr_blocks
+=
2716 le32_to_cpu(req
->base_r_fec_uncorr_blocks
);
2718 hclge_parse_fec_stats_lanes(hdev
, &desc
[1], desc_len
- 1);
2721 static int hclge_update_fec_stats_hw(struct hclge_dev
*hdev
)
2723 struct hclge_desc desc
[HCLGE_FEC_STATS_CMD_NUM
];
2727 for (i
= 0; i
< HCLGE_FEC_STATS_CMD_NUM
; i
++) {
2728 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_QUERY_FEC_STATS
,
2730 if (i
!= (HCLGE_FEC_STATS_CMD_NUM
- 1))
2731 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
2734 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2738 hclge_parse_fec_stats(hdev
, desc
, HCLGE_FEC_STATS_CMD_NUM
);
2743 static void hclge_update_fec_stats(struct hclge_dev
*hdev
)
2745 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
2748 if (!hnae3_ae_dev_fec_stats_supported(ae_dev
) ||
2749 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
))
2752 ret
= hclge_update_fec_stats_hw(hdev
);
2754 dev_err(&hdev
->pdev
->dev
,
2755 "failed to update fec stats, ret = %d\n", ret
);
2757 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING
, &hdev
->state
);
2760 static void hclge_get_fec_stats_total(struct hclge_dev
*hdev
,
2761 struct ethtool_fec_stats
*fec_stats
)
2763 fec_stats
->corrected_blocks
.total
= hdev
->fec_stats
.rs_corr_blocks
;
2764 fec_stats
->uncorrectable_blocks
.total
=
2765 hdev
->fec_stats
.rs_uncorr_blocks
;
2768 static void hclge_get_fec_stats_lanes(struct hclge_dev
*hdev
,
2769 struct ethtool_fec_stats
*fec_stats
)
2773 if (hdev
->fec_stats
.base_r_lane_num
== 0 ||
2774 hdev
->fec_stats
.base_r_lane_num
> HCLGE_FEC_STATS_MAX_LANES
) {
2775 dev_err(&hdev
->pdev
->dev
,
2776 "fec stats lane number(%llu) is invalid\n",
2777 hdev
->fec_stats
.base_r_lane_num
);
2781 for (i
= 0; i
< hdev
->fec_stats
.base_r_lane_num
; i
++) {
2782 fec_stats
->corrected_blocks
.lanes
[i
] =
2783 hdev
->fec_stats
.base_r_corr_per_lanes
[i
];
2784 fec_stats
->uncorrectable_blocks
.lanes
[i
] =
2785 hdev
->fec_stats
.base_r_uncorr_per_lanes
[i
];
2789 static void hclge_comm_get_fec_stats(struct hclge_dev
*hdev
,
2790 struct ethtool_fec_stats
*fec_stats
)
2792 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2795 case BIT(HNAE3_FEC_RS
):
2796 case BIT(HNAE3_FEC_LLRS
):
2797 hclge_get_fec_stats_total(hdev
, fec_stats
);
2799 case BIT(HNAE3_FEC_BASER
):
2800 hclge_get_fec_stats_lanes(hdev
, fec_stats
);
2803 dev_err(&hdev
->pdev
->dev
,
2804 "fec stats is not supported by current fec mode(0x%x)\n",
2810 static void hclge_get_fec_stats(struct hnae3_handle
*handle
,
2811 struct ethtool_fec_stats
*fec_stats
)
2813 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2814 struct hclge_dev
*hdev
= vport
->back
;
2815 u32 fec_mode
= hdev
->hw
.mac
.fec_mode
;
2817 if (fec_mode
== BIT(HNAE3_FEC_NONE
) ||
2818 fec_mode
== BIT(HNAE3_FEC_AUTO
) ||
2819 fec_mode
== BIT(HNAE3_FEC_USER_DEF
))
2822 hclge_update_fec_stats(hdev
);
2824 hclge_comm_get_fec_stats(hdev
, fec_stats
);
2827 static int hclge_set_fec_hw(struct hclge_dev
*hdev
, u32 fec_mode
)
2829 struct hclge_config_fec_cmd
*req
;
2830 struct hclge_desc desc
;
2833 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_FEC_MODE
, false);
2835 req
= (struct hclge_config_fec_cmd
*)desc
.data
;
2836 if (fec_mode
& BIT(HNAE3_FEC_AUTO
))
2837 hnae3_set_bit(req
->fec_mode
, HCLGE_MAC_CFG_FEC_AUTO_EN_B
, 1);
2838 if (fec_mode
& BIT(HNAE3_FEC_RS
))
2839 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2840 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_RS
);
2841 if (fec_mode
& BIT(HNAE3_FEC_LLRS
))
2842 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2843 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_LLRS
);
2844 if (fec_mode
& BIT(HNAE3_FEC_BASER
))
2845 hnae3_set_field(req
->fec_mode
, HCLGE_MAC_CFG_FEC_MODE_M
,
2846 HCLGE_MAC_CFG_FEC_MODE_S
, HCLGE_MAC_FEC_BASER
);
2848 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2850 dev_err(&hdev
->pdev
->dev
, "set fec mode failed %d.\n", ret
);
2855 static int hclge_set_fec(struct hnae3_handle
*handle
, u32 fec_mode
)
2857 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2858 struct hclge_dev
*hdev
= vport
->back
;
2859 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2862 if (fec_mode
&& !(mac
->fec_ability
& fec_mode
)) {
2863 dev_err(&hdev
->pdev
->dev
, "unsupported fec mode\n");
2867 ret
= hclge_set_fec_hw(hdev
, fec_mode
);
2871 mac
->user_fec_mode
= fec_mode
| BIT(HNAE3_FEC_USER_DEF
);
2875 static void hclge_get_fec(struct hnae3_handle
*handle
, u8
*fec_ability
,
2878 struct hclge_vport
*vport
= hclge_get_vport(handle
);
2879 struct hclge_dev
*hdev
= vport
->back
;
2880 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2883 *fec_ability
= mac
->fec_ability
;
2885 *fec_mode
= mac
->fec_mode
;
2888 static int hclge_mac_init(struct hclge_dev
*hdev
)
2890 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
2893 hdev
->support_sfp_query
= true;
2894 hdev
->hw
.mac
.duplex
= HCLGE_MAC_FULL
;
2895 ret
= hclge_cfg_mac_speed_dup_hw(hdev
, hdev
->hw
.mac
.speed
,
2896 hdev
->hw
.mac
.duplex
, hdev
->hw
.mac
.lane_num
);
2900 if (hdev
->hw
.mac
.support_autoneg
) {
2901 ret
= hclge_set_autoneg_en(hdev
, hdev
->hw
.mac
.autoneg
);
2908 if (mac
->user_fec_mode
& BIT(HNAE3_FEC_USER_DEF
)) {
2909 ret
= hclge_set_fec_hw(hdev
, mac
->user_fec_mode
);
2914 ret
= hclge_set_mac_mtu(hdev
, hdev
->mps
);
2916 dev_err(&hdev
->pdev
->dev
, "set mtu failed ret=%d\n", ret
);
2920 ret
= hclge_set_default_loopback(hdev
);
2924 ret
= hclge_buffer_alloc(hdev
);
2926 dev_err(&hdev
->pdev
->dev
,
2927 "allocate buffer fail, ret=%d\n", ret
);
2932 static void hclge_mbx_task_schedule(struct hclge_dev
*hdev
)
2934 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2935 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
)) {
2936 hdev
->last_mbx_scheduled
= jiffies
;
2937 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2941 static void hclge_reset_task_schedule(struct hclge_dev
*hdev
)
2943 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2944 test_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
) &&
2945 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
)) {
2946 hdev
->last_rst_scheduled
= jiffies
;
2947 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2951 static void hclge_errhand_task_schedule(struct hclge_dev
*hdev
)
2953 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2954 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
2955 mod_delayed_work(hclge_wq
, &hdev
->service_task
, 0);
2958 void hclge_task_schedule(struct hclge_dev
*hdev
, unsigned long delay_time
)
2960 if (!test_bit(HCLGE_STATE_REMOVING
, &hdev
->state
) &&
2961 !test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
2962 mod_delayed_work(hclge_wq
, &hdev
->service_task
, delay_time
);
2965 static int hclge_get_mac_link_status(struct hclge_dev
*hdev
, int *link_status
)
2967 struct hclge_link_status_cmd
*req
;
2968 struct hclge_desc desc
;
2971 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_STATUS
, true);
2972 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2974 dev_err(&hdev
->pdev
->dev
, "get link status cmd failed %d\n",
2979 req
= (struct hclge_link_status_cmd
*)desc
.data
;
2980 *link_status
= (req
->status
& HCLGE_LINK_STATUS_UP_M
) > 0 ?
2981 HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
2986 static int hclge_get_mac_phy_link(struct hclge_dev
*hdev
, int *link_status
)
2988 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
2990 *link_status
= HCLGE_LINK_STATUS_DOWN
;
2992 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
))
2995 if (phydev
&& (phydev
->state
!= PHY_RUNNING
|| !phydev
->link
))
2998 return hclge_get_mac_link_status(hdev
, link_status
);
3001 static void hclge_push_link_status(struct hclge_dev
*hdev
)
3003 struct hclge_vport
*vport
;
3007 for (i
= 0; i
< pci_num_vf(hdev
->pdev
); i
++) {
3008 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
3010 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) ||
3011 vport
->vf_info
.link_state
!= IFLA_VF_LINK_STATE_AUTO
)
3014 ret
= hclge_push_vf_link_status(vport
);
3016 dev_err(&hdev
->pdev
->dev
,
3017 "failed to push link status to vf%u, ret = %d\n",
3023 static void hclge_update_link_status(struct hclge_dev
*hdev
)
3025 struct hnae3_handle
*rhandle
= &hdev
->vport
[0].roce
;
3026 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3027 struct hnae3_client
*rclient
= hdev
->roce_client
;
3028 struct hnae3_client
*client
= hdev
->nic_client
;
3035 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
))
3038 ret
= hclge_get_mac_phy_link(hdev
, &state
);
3040 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3044 if (state
!= hdev
->hw
.mac
.link
) {
3045 hdev
->hw
.mac
.link
= state
;
3046 client
->ops
->link_status_change(handle
, state
);
3047 hclge_config_mac_tnl_int(hdev
, state
);
3048 if (rclient
&& rclient
->ops
->link_status_change
)
3049 rclient
->ops
->link_status_change(rhandle
, state
);
3051 hclge_push_link_status(hdev
);
3054 clear_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
);
3057 static void hclge_update_speed_advertising(struct hclge_mac
*mac
)
3061 if (hclge_get_speed_bit(mac
->speed
, &speed_ability
))
3064 switch (mac
->module_type
) {
3065 case HNAE3_MODULE_TYPE_FIBRE_LR
:
3066 hclge_convert_setting_lr(speed_ability
, mac
->advertising
);
3068 case HNAE3_MODULE_TYPE_FIBRE_SR
:
3069 case HNAE3_MODULE_TYPE_AOC
:
3070 hclge_convert_setting_sr(speed_ability
, mac
->advertising
);
3072 case HNAE3_MODULE_TYPE_CR
:
3073 hclge_convert_setting_cr(speed_ability
, mac
->advertising
);
3075 case HNAE3_MODULE_TYPE_KR
:
3076 hclge_convert_setting_kr(speed_ability
, mac
->advertising
);
3083 static void hclge_update_fec_advertising(struct hclge_mac
*mac
)
3085 if (mac
->fec_mode
& BIT(HNAE3_FEC_RS
))
3086 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
3088 else if (mac
->fec_mode
& BIT(HNAE3_FEC_LLRS
))
3089 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
3091 else if (mac
->fec_mode
& BIT(HNAE3_FEC_BASER
))
3092 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
3095 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
3099 static void hclge_update_pause_advertising(struct hclge_dev
*hdev
)
3101 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3104 switch (hdev
->fc_mode_last_time
) {
3105 case HCLGE_FC_RX_PAUSE
:
3109 case HCLGE_FC_TX_PAUSE
:
3123 linkmode_set_pause(mac
->advertising
, tx_en
, rx_en
);
3126 static void hclge_update_advertising(struct hclge_dev
*hdev
)
3128 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3130 linkmode_zero(mac
->advertising
);
3131 hclge_update_speed_advertising(mac
);
3132 hclge_update_fec_advertising(mac
);
3133 hclge_update_pause_advertising(hdev
);
3136 static void hclge_update_port_capability(struct hclge_dev
*hdev
,
3137 struct hclge_mac
*mac
)
3139 if (hnae3_dev_fec_supported(hdev
))
3140 hclge_convert_setting_fec(mac
);
3142 /* firmware can not identify back plane type, the media type
3143 * read from configuration can help deal it
3145 if (mac
->media_type
== HNAE3_MEDIA_TYPE_BACKPLANE
&&
3146 mac
->module_type
== HNAE3_MODULE_TYPE_UNKNOWN
)
3147 mac
->module_type
= HNAE3_MODULE_TYPE_KR
;
3148 else if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3149 mac
->module_type
= HNAE3_MODULE_TYPE_TP
;
3151 if (mac
->support_autoneg
) {
3152 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, mac
->supported
);
3153 linkmode_copy(mac
->advertising
, mac
->supported
);
3155 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
3157 hclge_update_advertising(hdev
);
3161 static int hclge_get_sfp_speed(struct hclge_dev
*hdev
, u32
*speed
)
3163 struct hclge_sfp_info_cmd
*resp
;
3164 struct hclge_desc desc
;
3167 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3168 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3169 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3170 if (ret
== -EOPNOTSUPP
) {
3171 dev_warn(&hdev
->pdev
->dev
,
3172 "IMP do not support get SFP speed %d\n", ret
);
3175 dev_err(&hdev
->pdev
->dev
, "get sfp speed failed %d\n", ret
);
3179 *speed
= le32_to_cpu(resp
->speed
);
3184 static int hclge_get_sfp_info(struct hclge_dev
*hdev
, struct hclge_mac
*mac
)
3186 struct hclge_sfp_info_cmd
*resp
;
3187 struct hclge_desc desc
;
3190 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_INFO
, true);
3191 resp
= (struct hclge_sfp_info_cmd
*)desc
.data
;
3193 resp
->query_type
= QUERY_ACTIVE_SPEED
;
3195 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3196 if (ret
== -EOPNOTSUPP
) {
3197 dev_warn(&hdev
->pdev
->dev
,
3198 "IMP does not support get SFP info %d\n", ret
);
3201 dev_err(&hdev
->pdev
->dev
, "get sfp info failed %d\n", ret
);
3205 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3206 * set to mac->speed.
3208 if (!le32_to_cpu(resp
->speed
))
3211 mac
->speed
= le32_to_cpu(resp
->speed
);
3212 /* if resp->speed_ability is 0, it means it's an old version
3213 * firmware, do not update these params
3215 if (resp
->speed_ability
) {
3216 mac
->module_type
= le32_to_cpu(resp
->module_type
);
3217 mac
->speed_ability
= le32_to_cpu(resp
->speed_ability
);
3218 mac
->autoneg
= resp
->autoneg
;
3219 mac
->support_autoneg
= resp
->autoneg_ability
;
3220 mac
->speed_type
= QUERY_ACTIVE_SPEED
;
3221 mac
->lane_num
= resp
->lane_num
;
3222 if (!resp
->active_fec
)
3225 mac
->fec_mode
= BIT(resp
->active_fec
);
3226 mac
->fec_ability
= resp
->fec_ability
;
3228 mac
->speed_type
= QUERY_SFP_SPEED
;
3234 static int hclge_get_phy_link_ksettings(struct hnae3_handle
*handle
,
3235 struct ethtool_link_ksettings
*cmd
)
3237 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3238 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3239 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3240 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3241 u32 supported
, advertising
, lp_advertising
;
3242 struct hclge_dev
*hdev
= vport
->back
;
3245 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3247 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3248 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3251 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3253 dev_err(&hdev
->pdev
->dev
,
3254 "failed to get phy link ksetting, ret = %d.\n", ret
);
3258 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3259 cmd
->base
.autoneg
= req0
->autoneg
;
3260 cmd
->base
.speed
= le32_to_cpu(req0
->speed
);
3261 cmd
->base
.duplex
= req0
->duplex
;
3262 cmd
->base
.port
= req0
->port
;
3263 cmd
->base
.transceiver
= req0
->transceiver
;
3264 cmd
->base
.phy_address
= req0
->phy_address
;
3265 cmd
->base
.eth_tp_mdix
= req0
->eth_tp_mdix
;
3266 cmd
->base
.eth_tp_mdix_ctrl
= req0
->eth_tp_mdix_ctrl
;
3267 supported
= le32_to_cpu(req0
->supported
);
3268 advertising
= le32_to_cpu(req0
->advertising
);
3269 lp_advertising
= le32_to_cpu(req0
->lp_advertising
);
3270 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
3272 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
3274 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.lp_advertising
,
3277 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3278 cmd
->base
.master_slave_cfg
= req1
->master_slave_cfg
;
3279 cmd
->base
.master_slave_state
= req1
->master_slave_state
;
3285 hclge_set_phy_link_ksettings(struct hnae3_handle
*handle
,
3286 const struct ethtool_link_ksettings
*cmd
)
3288 struct hclge_desc desc
[HCLGE_PHY_LINK_SETTING_BD_NUM
];
3289 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3290 struct hclge_phy_link_ksetting_0_cmd
*req0
;
3291 struct hclge_phy_link_ksetting_1_cmd
*req1
;
3292 struct hclge_dev
*hdev
= vport
->back
;
3296 if (cmd
->base
.autoneg
== AUTONEG_DISABLE
&&
3297 ((cmd
->base
.speed
!= SPEED_100
&& cmd
->base
.speed
!= SPEED_10
) ||
3298 (cmd
->base
.duplex
!= DUPLEX_HALF
&&
3299 cmd
->base
.duplex
!= DUPLEX_FULL
)))
3302 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_PHY_LINK_KSETTING
,
3304 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
3305 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_PHY_LINK_KSETTING
,
3308 req0
= (struct hclge_phy_link_ksetting_0_cmd
*)desc
[0].data
;
3309 req0
->autoneg
= cmd
->base
.autoneg
;
3310 req0
->speed
= cpu_to_le32(cmd
->base
.speed
);
3311 req0
->duplex
= cmd
->base
.duplex
;
3312 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
3313 cmd
->link_modes
.advertising
);
3314 req0
->advertising
= cpu_to_le32(advertising
);
3315 req0
->eth_tp_mdix_ctrl
= cmd
->base
.eth_tp_mdix_ctrl
;
3317 req1
= (struct hclge_phy_link_ksetting_1_cmd
*)desc
[1].data
;
3318 req1
->master_slave_cfg
= cmd
->base
.master_slave_cfg
;
3320 ret
= hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_PHY_LINK_SETTING_BD_NUM
);
3322 dev_err(&hdev
->pdev
->dev
,
3323 "failed to set phy link ksettings, ret = %d.\n", ret
);
3327 hdev
->hw
.mac
.autoneg
= cmd
->base
.autoneg
;
3328 hdev
->hw
.mac
.speed
= cmd
->base
.speed
;
3329 hdev
->hw
.mac
.duplex
= cmd
->base
.duplex
;
3330 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
->link_modes
.advertising
);
3335 static int hclge_update_tp_port_info(struct hclge_dev
*hdev
)
3337 struct ethtool_link_ksettings cmd
;
3340 if (!hnae3_dev_phy_imp_supported(hdev
))
3343 ret
= hclge_get_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3347 hdev
->hw
.mac
.autoneg
= cmd
.base
.autoneg
;
3348 hdev
->hw
.mac
.speed
= cmd
.base
.speed
;
3349 hdev
->hw
.mac
.duplex
= cmd
.base
.duplex
;
3350 linkmode_copy(hdev
->hw
.mac
.advertising
, cmd
.link_modes
.advertising
);
3355 static int hclge_tp_port_init(struct hclge_dev
*hdev
)
3357 struct ethtool_link_ksettings cmd
;
3359 if (!hnae3_dev_phy_imp_supported(hdev
))
3362 cmd
.base
.autoneg
= hdev
->hw
.mac
.autoneg
;
3363 cmd
.base
.speed
= hdev
->hw
.mac
.speed
;
3364 cmd
.base
.duplex
= hdev
->hw
.mac
.duplex
;
3365 linkmode_copy(cmd
.link_modes
.advertising
, hdev
->hw
.mac
.advertising
);
3367 return hclge_set_phy_link_ksettings(&hdev
->vport
->nic
, &cmd
);
3370 static int hclge_update_port_info(struct hclge_dev
*hdev
)
3372 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
3376 /* get the port info from SFP cmd if not copper port */
3377 if (mac
->media_type
== HNAE3_MEDIA_TYPE_COPPER
)
3378 return hclge_update_tp_port_info(hdev
);
3380 /* if IMP does not support get SFP/qSFP info, return directly */
3381 if (!hdev
->support_sfp_query
)
3384 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3386 ret
= hclge_get_sfp_info(hdev
, mac
);
3388 speed
= HCLGE_MAC_SPEED_UNKNOWN
;
3389 ret
= hclge_get_sfp_speed(hdev
, &speed
);
3392 if (ret
== -EOPNOTSUPP
) {
3393 hdev
->support_sfp_query
= false;
3399 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
3400 if (mac
->speed_type
== QUERY_ACTIVE_SPEED
) {
3401 hclge_update_port_capability(hdev
, mac
);
3402 if (mac
->speed
!= speed
)
3403 (void)hclge_tm_port_shaper_cfg(hdev
);
3406 return hclge_cfg_mac_speed_dup(hdev
, mac
->speed
,
3407 HCLGE_MAC_FULL
, mac
->lane_num
);
3409 if (speed
== HCLGE_MAC_SPEED_UNKNOWN
)
3410 return 0; /* do nothing if no SFP */
3412 /* must config full duplex for SFP */
3413 return hclge_cfg_mac_speed_dup(hdev
, speed
, HCLGE_MAC_FULL
, 0);
3417 static int hclge_get_status(struct hnae3_handle
*handle
)
3419 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3420 struct hclge_dev
*hdev
= vport
->back
;
3422 hclge_update_link_status(hdev
);
3424 return hdev
->hw
.mac
.link
;
3427 static struct hclge_vport
*hclge_get_vf_vport(struct hclge_dev
*hdev
, int vf
)
3429 if (!pci_num_vf(hdev
->pdev
)) {
3430 dev_err(&hdev
->pdev
->dev
,
3431 "SRIOV is disabled, can not get vport(%d) info.\n", vf
);
3435 if (vf
< 0 || vf
>= pci_num_vf(hdev
->pdev
)) {
3436 dev_err(&hdev
->pdev
->dev
,
3437 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3438 vf
, pci_num_vf(hdev
->pdev
));
3442 /* VF start from 1 in vport */
3443 vf
+= HCLGE_VF_VPORT_START_NUM
;
3444 return &hdev
->vport
[vf
];
3447 static int hclge_get_vf_config(struct hnae3_handle
*handle
, int vf
,
3448 struct ifla_vf_info
*ivf
)
3450 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3451 struct hclge_dev
*hdev
= vport
->back
;
3453 vport
= hclge_get_vf_vport(hdev
, vf
);
3458 ivf
->linkstate
= vport
->vf_info
.link_state
;
3459 ivf
->spoofchk
= vport
->vf_info
.spoofchk
;
3460 ivf
->trusted
= vport
->vf_info
.trusted
;
3461 ivf
->min_tx_rate
= 0;
3462 ivf
->max_tx_rate
= vport
->vf_info
.max_tx_rate
;
3463 ivf
->vlan
= vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
;
3464 ivf
->vlan_proto
= htons(vport
->port_base_vlan_cfg
.vlan_info
.vlan_proto
);
3465 ivf
->qos
= vport
->port_base_vlan_cfg
.vlan_info
.qos
;
3466 ether_addr_copy(ivf
->mac
, vport
->vf_info
.mac
);
3471 static int hclge_set_vf_link_state(struct hnae3_handle
*handle
, int vf
,
3474 struct hclge_vport
*vport
= hclge_get_vport(handle
);
3475 struct hclge_dev
*hdev
= vport
->back
;
3479 vport
= hclge_get_vf_vport(hdev
, vf
);
3483 link_state_old
= vport
->vf_info
.link_state
;
3484 vport
->vf_info
.link_state
= link_state
;
3486 /* return success directly if the VF is unalive, VF will
3487 * query link state itself when it starts work.
3489 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
3492 ret
= hclge_push_vf_link_status(vport
);
3494 vport
->vf_info
.link_state
= link_state_old
;
3495 dev_err(&hdev
->pdev
->dev
,
3496 "failed to push vf%d link status, ret = %d\n", vf
, ret
);
3502 static u32
hclge_check_event_cause(struct hclge_dev
*hdev
, u32
*clearval
)
3504 u32 cmdq_src_reg
, msix_src_reg
, hw_err_src_reg
;
3506 /* fetch the events from their corresponding regs */
3507 cmdq_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
);
3508 msix_src_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
3509 hw_err_src_reg
= hclge_read_dev(&hdev
->hw
,
3510 HCLGE_RAS_PF_OTHER_INT_STS_REG
);
3512 /* Assumption: If by any chance reset and mailbox events are reported
3513 * together then we will only process reset event in this go and will
3514 * defer the processing of the mailbox events. Since, we would have not
3515 * cleared RX CMDQ event this time we would receive again another
3516 * interrupt from H/W just for the mailbox.
3518 * check for vector0 reset event sources
3520 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B
) & msix_src_reg
) {
3521 dev_info(&hdev
->pdev
->dev
, "IMP reset interrupt\n");
3522 set_bit(HNAE3_IMP_RESET
, &hdev
->reset_pending
);
3523 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3524 *clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
3525 hdev
->rst_stats
.imp_rst_cnt
++;
3526 return HCLGE_VECTOR0_EVENT_RST
;
3529 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) & msix_src_reg
) {
3530 dev_info(&hdev
->pdev
->dev
, "global reset interrupt\n");
3531 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
3532 set_bit(HNAE3_GLOBAL_RESET
, &hdev
->reset_pending
);
3533 *clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
3534 hdev
->rst_stats
.global_rst_cnt
++;
3535 return HCLGE_VECTOR0_EVENT_RST
;
3538 /* check for vector0 msix event and hardware error event source */
3539 if (msix_src_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
||
3540 hw_err_src_reg
& HCLGE_RAS_REG_ERR_MASK
)
3541 return HCLGE_VECTOR0_EVENT_ERR
;
3543 /* check for vector0 ptp event source */
3544 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B
) & msix_src_reg
) {
3545 *clearval
= msix_src_reg
;
3546 return HCLGE_VECTOR0_EVENT_PTP
;
3549 /* check for vector0 mailbox(=CMDQ RX) event source */
3550 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
3551 cmdq_src_reg
&= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B
);
3552 *clearval
= cmdq_src_reg
;
3553 return HCLGE_VECTOR0_EVENT_MBX
;
3556 /* print other vector0 event source */
3557 dev_info(&hdev
->pdev
->dev
,
3558 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3559 cmdq_src_reg
, hw_err_src_reg
, msix_src_reg
);
3561 return HCLGE_VECTOR0_EVENT_OTHER
;
3564 static void hclge_clear_event_cause(struct hclge_dev
*hdev
, u32 event_type
,
3567 switch (event_type
) {
3568 case HCLGE_VECTOR0_EVENT_PTP
:
3569 case HCLGE_VECTOR0_EVENT_RST
:
3570 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
, regclr
);
3572 case HCLGE_VECTOR0_EVENT_MBX
:
3573 hclge_write_dev(&hdev
->hw
, HCLGE_VECTOR0_CMDQ_SRC_REG
, regclr
);
3580 static void hclge_clear_all_event_cause(struct hclge_dev
*hdev
)
3582 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_RST
,
3583 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
) |
3584 BIT(HCLGE_VECTOR0_CORERESET_INT_B
) |
3585 BIT(HCLGE_VECTOR0_IMPRESET_INT_B
));
3586 hclge_clear_event_cause(hdev
, HCLGE_VECTOR0_EVENT_MBX
, 0);
3589 static void hclge_enable_vector(struct hclge_misc_vector
*vector
, bool enable
)
3591 writel(enable
? 1 : 0, vector
->addr
);
3594 static irqreturn_t
hclge_misc_irq_handle(int irq
, void *data
)
3596 struct hclge_dev
*hdev
= data
;
3597 unsigned long flags
;
3601 hclge_enable_vector(&hdev
->misc_vector
, false);
3602 event_cause
= hclge_check_event_cause(hdev
, &clearval
);
3604 /* vector 0 interrupt is shared with reset and mailbox source events. */
3605 switch (event_cause
) {
3606 case HCLGE_VECTOR0_EVENT_ERR
:
3607 hclge_errhand_task_schedule(hdev
);
3609 case HCLGE_VECTOR0_EVENT_RST
:
3610 hclge_reset_task_schedule(hdev
);
3612 case HCLGE_VECTOR0_EVENT_PTP
:
3613 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
3614 hclge_ptp_clean_tx_hwts(hdev
);
3615 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
3617 case HCLGE_VECTOR0_EVENT_MBX
:
3618 /* If we are here then,
3619 * 1. Either we are not handling any mbx task and we are not
3622 * 2. We could be handling a mbx task but nothing more is
3624 * In both cases, we should schedule mbx task as there are more
3625 * mbx messages reported by this interrupt.
3627 hclge_mbx_task_schedule(hdev
);
3630 dev_warn(&hdev
->pdev
->dev
,
3631 "received unknown or unhandled event of vector0\n");
3635 hclge_clear_event_cause(hdev
, event_cause
, clearval
);
3637 /* Enable interrupt if it is not caused by reset event or error event */
3638 if (event_cause
== HCLGE_VECTOR0_EVENT_PTP
||
3639 event_cause
== HCLGE_VECTOR0_EVENT_MBX
||
3640 event_cause
== HCLGE_VECTOR0_EVENT_OTHER
)
3641 hclge_enable_vector(&hdev
->misc_vector
, true);
3646 static void hclge_free_vector(struct hclge_dev
*hdev
, int vector_id
)
3648 if (hdev
->vector_status
[vector_id
] == HCLGE_INVALID_VPORT
) {
3649 dev_warn(&hdev
->pdev
->dev
,
3650 "vector(vector_id %d) has been freed.\n", vector_id
);
3654 hdev
->vector_status
[vector_id
] = HCLGE_INVALID_VPORT
;
3655 hdev
->num_msi_left
+= 1;
3656 hdev
->num_msi_used
-= 1;
3659 static void hclge_get_misc_vector(struct hclge_dev
*hdev
)
3661 struct hclge_misc_vector
*vector
= &hdev
->misc_vector
;
3663 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
, 0);
3665 vector
->addr
= hdev
->hw
.hw
.io_base
+ HCLGE_MISC_VECTOR_REG_BASE
;
3666 hdev
->vector_status
[0] = 0;
3668 hdev
->num_msi_left
-= 1;
3669 hdev
->num_msi_used
+= 1;
3672 static int hclge_misc_irq_init(struct hclge_dev
*hdev
)
3676 hclge_get_misc_vector(hdev
);
3678 /* this would be explicitly freed in the end */
3679 snprintf(hdev
->misc_vector
.name
, HNAE3_INT_NAME_LEN
, "%s-misc-%s",
3680 HCLGE_NAME
, pci_name(hdev
->pdev
));
3681 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclge_misc_irq_handle
,
3682 0, hdev
->misc_vector
.name
, hdev
);
3684 hclge_free_vector(hdev
, 0);
3685 dev_err(&hdev
->pdev
->dev
, "request misc irq(%d) fail\n",
3686 hdev
->misc_vector
.vector_irq
);
3692 static void hclge_misc_irq_uninit(struct hclge_dev
*hdev
)
3694 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
3695 hclge_free_vector(hdev
, 0);
3698 int hclge_notify_client(struct hclge_dev
*hdev
,
3699 enum hnae3_reset_notify_type type
)
3701 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3702 struct hnae3_client
*client
= hdev
->nic_client
;
3705 if (!test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
) || !client
)
3708 if (!client
->ops
->reset_notify
)
3711 ret
= client
->ops
->reset_notify(handle
, type
);
3713 dev_err(&hdev
->pdev
->dev
, "notify nic client failed %d(%d)\n",
3719 static int hclge_notify_roce_client(struct hclge_dev
*hdev
,
3720 enum hnae3_reset_notify_type type
)
3722 struct hnae3_handle
*handle
= &hdev
->vport
[0].roce
;
3723 struct hnae3_client
*client
= hdev
->roce_client
;
3726 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
) || !client
)
3729 if (!client
->ops
->reset_notify
)
3732 ret
= client
->ops
->reset_notify(handle
, type
);
3734 dev_err(&hdev
->pdev
->dev
, "notify roce client failed %d(%d)",
3740 static int hclge_reset_wait(struct hclge_dev
*hdev
)
3742 #define HCLGE_RESET_WATI_MS 100
3743 #define HCLGE_RESET_WAIT_CNT 350
3745 u32 val
, reg
, reg_bit
;
3748 switch (hdev
->reset_type
) {
3749 case HNAE3_IMP_RESET
:
3750 reg
= HCLGE_GLOBAL_RESET_REG
;
3751 reg_bit
= HCLGE_IMP_RESET_BIT
;
3753 case HNAE3_GLOBAL_RESET
:
3754 reg
= HCLGE_GLOBAL_RESET_REG
;
3755 reg_bit
= HCLGE_GLOBAL_RESET_BIT
;
3757 case HNAE3_FUNC_RESET
:
3758 reg
= HCLGE_FUN_RST_ING
;
3759 reg_bit
= HCLGE_FUN_RST_ING_B
;
3762 dev_err(&hdev
->pdev
->dev
,
3763 "Wait for unsupported reset type: %d\n",
3768 val
= hclge_read_dev(&hdev
->hw
, reg
);
3769 while (hnae3_get_bit(val
, reg_bit
) && cnt
< HCLGE_RESET_WAIT_CNT
) {
3770 msleep(HCLGE_RESET_WATI_MS
);
3771 val
= hclge_read_dev(&hdev
->hw
, reg
);
3775 if (cnt
>= HCLGE_RESET_WAIT_CNT
) {
3776 dev_warn(&hdev
->pdev
->dev
,
3777 "Wait for reset timeout: %d\n", hdev
->reset_type
);
3784 static int hclge_set_vf_rst(struct hclge_dev
*hdev
, int func_id
, bool reset
)
3786 struct hclge_vf_rst_cmd
*req
;
3787 struct hclge_desc desc
;
3789 req
= (struct hclge_vf_rst_cmd
*)desc
.data
;
3790 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GBL_RST_STATUS
, false);
3791 req
->dest_vfid
= func_id
;
3796 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3799 static int hclge_set_all_vf_rst(struct hclge_dev
*hdev
, bool reset
)
3803 for (i
= HCLGE_VF_VPORT_START_NUM
; i
< hdev
->num_alloc_vport
; i
++) {
3804 struct hclge_vport
*vport
= &hdev
->vport
[i
];
3807 /* Send cmd to set/clear VF's FUNC_RST_ING */
3808 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, reset
);
3810 dev_err(&hdev
->pdev
->dev
,
3811 "set vf(%u) rst failed %d!\n",
3812 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3818 !test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
))
3821 if (!test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
) &&
3822 hdev
->reset_type
== HNAE3_FUNC_RESET
) {
3823 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET
,
3824 &vport
->need_notify
);
3828 /* Inform VF to process the reset.
3829 * hclge_inform_reset_assert_to_vf may fail if VF
3830 * driver is not loaded.
3832 ret
= hclge_inform_reset_assert_to_vf(vport
);
3834 dev_warn(&hdev
->pdev
->dev
,
3835 "inform reset to vf(%u) failed %d!\n",
3836 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
,
3843 static void hclge_mailbox_service_task(struct hclge_dev
*hdev
)
3845 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
) ||
3846 test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
) ||
3847 test_and_set_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
))
3850 if (time_is_before_jiffies(hdev
->last_mbx_scheduled
+
3851 HCLGE_MBX_SCHED_TIMEOUT
))
3852 dev_warn(&hdev
->pdev
->dev
,
3853 "mbx service task is scheduled after %ums on cpu%u!\n",
3854 jiffies_to_msecs(jiffies
- hdev
->last_mbx_scheduled
),
3855 smp_processor_id());
3857 hclge_mbx_handler(hdev
);
3859 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
3862 static void hclge_func_reset_sync_vf(struct hclge_dev
*hdev
)
3864 struct hclge_pf_rst_sync_cmd
*req
;
3865 struct hclge_desc desc
;
3869 req
= (struct hclge_pf_rst_sync_cmd
*)desc
.data
;
3870 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_VF_RST_RDY
, true);
3873 /* vf need to down netdev by mbx during PF or FLR reset */
3874 hclge_mailbox_service_task(hdev
);
3876 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3877 /* for compatible with old firmware, wait
3878 * 100 ms for VF to stop IO
3880 if (ret
== -EOPNOTSUPP
) {
3881 msleep(HCLGE_RESET_SYNC_TIME
);
3884 dev_warn(&hdev
->pdev
->dev
, "sync with VF fail %d!\n",
3887 } else if (req
->all_vf_ready
) {
3890 msleep(HCLGE_PF_RESET_SYNC_TIME
);
3891 hclge_comm_cmd_reuse_desc(&desc
, true);
3892 } while (cnt
++ < HCLGE_PF_RESET_SYNC_CNT
);
3894 dev_warn(&hdev
->pdev
->dev
, "sync with VF timeout!\n");
3897 void hclge_report_hw_error(struct hclge_dev
*hdev
,
3898 enum hnae3_hw_error_type type
)
3900 struct hnae3_client
*client
= hdev
->nic_client
;
3902 if (!client
|| !client
->ops
->process_hw_error
||
3903 !test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
))
3906 client
->ops
->process_hw_error(&hdev
->vport
[0].nic
, type
);
3909 static void hclge_handle_imp_error(struct hclge_dev
*hdev
)
3913 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
3914 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
)) {
3915 hclge_report_hw_error(hdev
, HNAE3_IMP_RD_POISON_ERROR
);
3916 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B
);
3917 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
3920 if (reg_val
& BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
)) {
3921 hclge_report_hw_error(hdev
, HNAE3_CMDQ_ECC_ERROR
);
3922 reg_val
&= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B
);
3923 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, reg_val
);
3927 int hclge_func_reset_cmd(struct hclge_dev
*hdev
, int func_id
)
3929 struct hclge_desc desc
;
3930 struct hclge_reset_cmd
*req
= (struct hclge_reset_cmd
*)desc
.data
;
3933 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
3934 hnae3_set_bit(req
->mac_func_reset
, HCLGE_CFG_RESET_FUNC_B
, 1);
3935 req
->fun_reset_vfid
= func_id
;
3937 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
3939 dev_err(&hdev
->pdev
->dev
,
3940 "send function reset cmd fail, status =%d\n", ret
);
3945 static void hclge_do_reset(struct hclge_dev
*hdev
)
3947 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
3948 struct pci_dev
*pdev
= hdev
->pdev
;
3951 if (hclge_get_hw_reset_stat(handle
)) {
3952 dev_info(&pdev
->dev
, "hardware reset not finish\n");
3953 dev_info(&pdev
->dev
, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3954 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
),
3955 hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
));
3959 switch (hdev
->reset_type
) {
3960 case HNAE3_IMP_RESET
:
3961 dev_info(&pdev
->dev
, "IMP reset requested\n");
3962 val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
3963 hnae3_set_bit(val
, HCLGE_TRIGGER_IMP_RESET_B
, 1);
3964 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
, val
);
3966 case HNAE3_GLOBAL_RESET
:
3967 dev_info(&pdev
->dev
, "global reset requested\n");
3968 val
= hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
);
3969 hnae3_set_bit(val
, HCLGE_GLOBAL_RESET_BIT
, 1);
3970 hclge_write_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
, val
);
3972 case HNAE3_FUNC_RESET
:
3973 dev_info(&pdev
->dev
, "PF reset requested\n");
3974 /* schedule again to check later */
3975 set_bit(HNAE3_FUNC_RESET
, &hdev
->reset_pending
);
3976 hclge_reset_task_schedule(hdev
);
3979 dev_warn(&pdev
->dev
,
3980 "unsupported reset type: %d\n", hdev
->reset_type
);
3985 static enum hnae3_reset_type
hclge_get_reset_level(struct hnae3_ae_dev
*ae_dev
,
3986 unsigned long *addr
)
3988 enum hnae3_reset_type rst_level
= HNAE3_NONE_RESET
;
3989 struct hclge_dev
*hdev
= ae_dev
->priv
;
3991 /* return the highest priority reset level amongst all */
3992 if (test_bit(HNAE3_IMP_RESET
, addr
)) {
3993 rst_level
= HNAE3_IMP_RESET
;
3994 clear_bit(HNAE3_IMP_RESET
, addr
);
3995 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
3996 clear_bit(HNAE3_FUNC_RESET
, addr
);
3997 } else if (test_bit(HNAE3_GLOBAL_RESET
, addr
)) {
3998 rst_level
= HNAE3_GLOBAL_RESET
;
3999 clear_bit(HNAE3_GLOBAL_RESET
, addr
);
4000 clear_bit(HNAE3_FUNC_RESET
, addr
);
4001 } else if (test_bit(HNAE3_FUNC_RESET
, addr
)) {
4002 rst_level
= HNAE3_FUNC_RESET
;
4003 clear_bit(HNAE3_FUNC_RESET
, addr
);
4004 } else if (test_bit(HNAE3_FLR_RESET
, addr
)) {
4005 rst_level
= HNAE3_FLR_RESET
;
4006 clear_bit(HNAE3_FLR_RESET
, addr
);
4009 if (hdev
->reset_type
!= HNAE3_NONE_RESET
&&
4010 rst_level
< hdev
->reset_type
)
4011 return HNAE3_NONE_RESET
;
4016 static void hclge_clear_reset_cause(struct hclge_dev
*hdev
)
4020 switch (hdev
->reset_type
) {
4021 case HNAE3_IMP_RESET
:
4022 clearval
= BIT(HCLGE_VECTOR0_IMPRESET_INT_B
);
4024 case HNAE3_GLOBAL_RESET
:
4025 clearval
= BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B
);
4034 /* For revision 0x20, the reset interrupt source
4035 * can only be cleared after hardware reset done
4037 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
4038 hclge_write_dev(&hdev
->hw
, HCLGE_MISC_RESET_STS_REG
,
4041 hclge_enable_vector(&hdev
->misc_vector
, true);
4044 static void hclge_reset_handshake(struct hclge_dev
*hdev
, bool enable
)
4048 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
);
4050 reg_val
|= HCLGE_COMM_NIC_SW_RST_RDY
;
4052 reg_val
&= ~HCLGE_COMM_NIC_SW_RST_RDY
;
4054 hclge_write_dev(&hdev
->hw
, HCLGE_COMM_NIC_CSQ_DEPTH_REG
, reg_val
);
4057 static int hclge_func_reset_notify_vf(struct hclge_dev
*hdev
)
4061 ret
= hclge_set_all_vf_rst(hdev
, true);
4065 hclge_func_reset_sync_vf(hdev
);
4070 static int hclge_reset_prepare_wait(struct hclge_dev
*hdev
)
4075 switch (hdev
->reset_type
) {
4076 case HNAE3_FUNC_RESET
:
4077 ret
= hclge_func_reset_notify_vf(hdev
);
4081 ret
= hclge_func_reset_cmd(hdev
, 0);
4083 dev_err(&hdev
->pdev
->dev
,
4084 "asserting function reset fail %d!\n", ret
);
4088 /* After performaning pf reset, it is not necessary to do the
4089 * mailbox handling or send any command to firmware, because
4090 * any mailbox handling or command to firmware is only valid
4091 * after hclge_comm_cmd_init is called.
4093 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
4094 hdev
->rst_stats
.pf_rst_cnt
++;
4096 case HNAE3_FLR_RESET
:
4097 ret
= hclge_func_reset_notify_vf(hdev
);
4101 case HNAE3_IMP_RESET
:
4102 hclge_handle_imp_error(hdev
);
4103 reg_val
= hclge_read_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
);
4104 hclge_write_dev(&hdev
->hw
, HCLGE_PF_OTHER_INT_REG
,
4105 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B
) | reg_val
);
4111 /* inform hardware that preparatory work is done */
4112 msleep(HCLGE_RESET_SYNC_TIME
);
4113 hclge_reset_handshake(hdev
, true);
4114 dev_info(&hdev
->pdev
->dev
, "prepare wait ok\n");
4119 static void hclge_show_rst_info(struct hclge_dev
*hdev
)
4123 buf
= kzalloc(HCLGE_DBG_RESET_INFO_LEN
, GFP_KERNEL
);
4127 hclge_dbg_dump_rst_info(hdev
, buf
, HCLGE_DBG_RESET_INFO_LEN
);
4129 dev_info(&hdev
->pdev
->dev
, "dump reset info:\n%s", buf
);
4134 static bool hclge_reset_err_handle(struct hclge_dev
*hdev
)
4136 #define MAX_RESET_FAIL_CNT 5
4138 if (hdev
->reset_pending
) {
4139 dev_info(&hdev
->pdev
->dev
, "Reset pending %lu\n",
4140 hdev
->reset_pending
);
4142 } else if (hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
) &
4143 HCLGE_RESET_INT_M
) {
4144 dev_info(&hdev
->pdev
->dev
,
4145 "reset failed because new reset interrupt\n");
4146 hclge_clear_reset_cause(hdev
);
4148 } else if (hdev
->rst_stats
.reset_fail_cnt
< MAX_RESET_FAIL_CNT
) {
4149 hdev
->rst_stats
.reset_fail_cnt
++;
4150 set_bit(hdev
->reset_type
, &hdev
->reset_pending
);
4151 dev_info(&hdev
->pdev
->dev
,
4152 "re-schedule reset task(%u)\n",
4153 hdev
->rst_stats
.reset_fail_cnt
);
4157 hclge_clear_reset_cause(hdev
);
4159 /* recover the handshake status when reset fail */
4160 hclge_reset_handshake(hdev
, true);
4162 dev_err(&hdev
->pdev
->dev
, "Reset fail!\n");
4164 hclge_show_rst_info(hdev
);
4166 set_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4171 static void hclge_update_reset_level(struct hclge_dev
*hdev
)
4173 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4174 enum hnae3_reset_type reset_level
;
4176 /* reset request will not be set during reset, so clear
4177 * pending reset request to avoid unnecessary reset
4178 * caused by the same reason.
4180 hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4182 /* if default_reset_request has a higher level reset request,
4183 * it should be handled as soon as possible. since some errors
4184 * need this kind of reset to fix.
4186 reset_level
= hclge_get_reset_level(ae_dev
,
4187 &hdev
->default_reset_request
);
4188 if (reset_level
!= HNAE3_NONE_RESET
)
4189 set_bit(reset_level
, &hdev
->reset_request
);
4192 static int hclge_set_rst_done(struct hclge_dev
*hdev
)
4194 struct hclge_pf_rst_done_cmd
*req
;
4195 struct hclge_desc desc
;
4198 req
= (struct hclge_pf_rst_done_cmd
*)desc
.data
;
4199 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PF_RST_DONE
, false);
4200 req
->pf_rst_done
|= HCLGE_PF_RESET_DONE_BIT
;
4202 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4203 /* To be compatible with the old firmware, which does not support
4204 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4207 if (ret
== -EOPNOTSUPP
) {
4208 dev_warn(&hdev
->pdev
->dev
,
4209 "current firmware does not support command(0x%x)!\n",
4210 HCLGE_OPC_PF_RST_DONE
);
4213 dev_err(&hdev
->pdev
->dev
, "assert PF reset done fail %d!\n",
4220 static int hclge_reset_prepare_up(struct hclge_dev
*hdev
)
4224 switch (hdev
->reset_type
) {
4225 case HNAE3_FUNC_RESET
:
4226 case HNAE3_FLR_RESET
:
4227 ret
= hclge_set_all_vf_rst(hdev
, false);
4229 case HNAE3_GLOBAL_RESET
:
4230 case HNAE3_IMP_RESET
:
4231 ret
= hclge_set_rst_done(hdev
);
4237 /* clear up the handshake status after re-initialize done */
4238 hclge_reset_handshake(hdev
, false);
4243 static int hclge_reset_stack(struct hclge_dev
*hdev
)
4247 ret
= hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
4251 ret
= hclge_reset_ae_dev(hdev
->ae_dev
);
4255 return hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
4258 static int hclge_reset_prepare(struct hclge_dev
*hdev
)
4262 hdev
->rst_stats
.reset_cnt
++;
4263 /* perform reset of the stack & ae device for a client */
4264 ret
= hclge_notify_roce_client(hdev
, HNAE3_DOWN_CLIENT
);
4269 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
4274 return hclge_reset_prepare_wait(hdev
);
4277 static int hclge_reset_rebuild(struct hclge_dev
*hdev
)
4281 hdev
->rst_stats
.hw_reset_done_cnt
++;
4283 ret
= hclge_notify_roce_client(hdev
, HNAE3_UNINIT_CLIENT
);
4288 ret
= hclge_reset_stack(hdev
);
4293 hclge_clear_reset_cause(hdev
);
4295 ret
= hclge_notify_roce_client(hdev
, HNAE3_INIT_CLIENT
);
4296 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4300 hdev
->rst_stats
.reset_fail_cnt
< HCLGE_RESET_MAX_FAIL_CNT
- 1)
4303 ret
= hclge_reset_prepare_up(hdev
);
4308 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
4313 ret
= hclge_notify_roce_client(hdev
, HNAE3_UP_CLIENT
);
4317 hdev
->last_reset_time
= jiffies
;
4318 hdev
->rst_stats
.reset_fail_cnt
= 0;
4319 hdev
->rst_stats
.reset_done_cnt
++;
4320 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
4322 hclge_update_reset_level(hdev
);
4327 static void hclge_reset(struct hclge_dev
*hdev
)
4329 if (hclge_reset_prepare(hdev
))
4332 if (hclge_reset_wait(hdev
))
4335 if (hclge_reset_rebuild(hdev
))
4341 if (hclge_reset_err_handle(hdev
))
4342 hclge_reset_task_schedule(hdev
);
4345 static void hclge_reset_event(struct pci_dev
*pdev
, struct hnae3_handle
*handle
)
4347 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
4348 struct hclge_dev
*hdev
= ae_dev
->priv
;
4350 /* We might end up getting called broadly because of 2 below cases:
4351 * 1. Recoverable error was conveyed through APEI and only way to bring
4352 * normalcy is to reset.
4353 * 2. A new reset request from the stack due to timeout
4355 * check if this is a new reset request and we are not here just because
4356 * last reset attempt did not succeed and watchdog hit us again. We will
4357 * know this if last reset request did not occur very recently (watchdog
4358 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4359 * In case of new request we reset the "reset level" to PF reset.
4360 * And if it is a repeat reset request of the most recent one then we
4361 * want to make sure we throttle the reset request. Therefore, we will
4362 * not allow it again before 3*HZ times.
4365 if (time_before(jiffies
, (hdev
->last_reset_time
+
4366 HCLGE_RESET_INTERVAL
))) {
4367 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
4371 if (hdev
->default_reset_request
) {
4373 hclge_get_reset_level(ae_dev
,
4374 &hdev
->default_reset_request
);
4375 } else if (time_after(jiffies
, (hdev
->last_reset_time
+ 4 * 5 * HZ
))) {
4376 hdev
->reset_level
= HNAE3_FUNC_RESET
;
4379 dev_info(&hdev
->pdev
->dev
, "received reset event, reset type is %d\n",
4382 /* request reset & schedule reset task */
4383 set_bit(hdev
->reset_level
, &hdev
->reset_request
);
4384 hclge_reset_task_schedule(hdev
);
4386 if (hdev
->reset_level
< HNAE3_GLOBAL_RESET
)
4387 hdev
->reset_level
++;
4390 static void hclge_set_def_reset_request(struct hnae3_ae_dev
*ae_dev
,
4391 enum hnae3_reset_type rst_type
)
4393 struct hclge_dev
*hdev
= ae_dev
->priv
;
4395 set_bit(rst_type
, &hdev
->default_reset_request
);
4398 static void hclge_reset_timer(struct timer_list
*t
)
4400 struct hclge_dev
*hdev
= from_timer(hdev
, t
, reset_timer
);
4402 /* if default_reset_request has no value, it means that this reset
4403 * request has already be handled, so just return here
4405 if (!hdev
->default_reset_request
)
4408 dev_info(&hdev
->pdev
->dev
,
4409 "triggering reset in reset timer\n");
4410 hclge_reset_event(hdev
->pdev
, NULL
);
4413 static void hclge_reset_subtask(struct hclge_dev
*hdev
)
4415 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4417 /* check if there is any ongoing reset in the hardware. This status can
4418 * be checked from reset_pending. If there is then, we need to wait for
4419 * hardware to complete reset.
4420 * a. If we are able to figure out in reasonable time that hardware
4421 * has fully resetted then, we can proceed with driver, client
4423 * b. else, we can come back later to check this status so re-sched
4426 hdev
->last_reset_time
= jiffies
;
4427 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_pending
);
4428 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4431 /* check if we got any *new* reset requests to be honored */
4432 hdev
->reset_type
= hclge_get_reset_level(ae_dev
, &hdev
->reset_request
);
4433 if (hdev
->reset_type
!= HNAE3_NONE_RESET
)
4434 hclge_do_reset(hdev
);
4436 hdev
->reset_type
= HNAE3_NONE_RESET
;
4439 static void hclge_handle_err_reset_request(struct hclge_dev
*hdev
)
4441 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4442 enum hnae3_reset_type reset_type
;
4444 if (ae_dev
->hw_err_reset_req
) {
4445 reset_type
= hclge_get_reset_level(ae_dev
,
4446 &ae_dev
->hw_err_reset_req
);
4447 hclge_set_def_reset_request(ae_dev
, reset_type
);
4450 if (hdev
->default_reset_request
&& ae_dev
->ops
->reset_event
)
4451 ae_dev
->ops
->reset_event(hdev
->pdev
, NULL
);
4453 /* enable interrupt after error handling complete */
4454 hclge_enable_vector(&hdev
->misc_vector
, true);
4457 static void hclge_handle_err_recovery(struct hclge_dev
*hdev
)
4459 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4461 ae_dev
->hw_err_reset_req
= 0;
4463 if (hclge_find_error_source(hdev
)) {
4464 hclge_handle_error_info_log(ae_dev
);
4465 hclge_handle_mac_tnl(hdev
);
4468 hclge_handle_err_reset_request(hdev
);
4471 static void hclge_misc_err_recovery(struct hclge_dev
*hdev
)
4473 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
4474 struct device
*dev
= &hdev
->pdev
->dev
;
4477 msix_sts_reg
= hclge_read_dev(&hdev
->hw
, HCLGE_MISC_VECTOR_INT_STS
);
4478 if (msix_sts_reg
& HCLGE_VECTOR0_REG_MSIX_MASK
) {
4479 if (hclge_handle_hw_msix_error
4480 (hdev
, &hdev
->default_reset_request
))
4481 dev_info(dev
, "received msix interrupt 0x%x\n",
4485 hclge_handle_hw_ras_error(ae_dev
);
4487 hclge_handle_err_reset_request(hdev
);
4490 static void hclge_errhand_service_task(struct hclge_dev
*hdev
)
4492 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED
, &hdev
->state
))
4495 if (hnae3_dev_ras_imp_supported(hdev
))
4496 hclge_handle_err_recovery(hdev
);
4498 hclge_misc_err_recovery(hdev
);
4501 static void hclge_reset_service_task(struct hclge_dev
*hdev
)
4503 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
))
4506 if (time_is_before_jiffies(hdev
->last_rst_scheduled
+
4507 HCLGE_RESET_SCHED_TIMEOUT
))
4508 dev_warn(&hdev
->pdev
->dev
,
4509 "reset service task is scheduled after %ums on cpu%u!\n",
4510 jiffies_to_msecs(jiffies
- hdev
->last_rst_scheduled
),
4511 smp_processor_id());
4513 down(&hdev
->reset_sem
);
4514 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4516 hclge_reset_subtask(hdev
);
4518 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
4519 up(&hdev
->reset_sem
);
4522 static void hclge_update_vport_alive(struct hclge_dev
*hdev
)
4524 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4526 unsigned long alive_time
= HCLGE_ALIVE_SECONDS_NORMAL
* HZ
;
4529 /* start from vport 1 for PF is always alive */
4530 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++) {
4531 struct hclge_vport
*vport
= &hdev
->vport
[i
];
4533 if (!test_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
) ||
4534 !test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
4536 if (time_after(jiffies
, vport
->last_active_jiffies
+
4538 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
4539 dev_warn(&hdev
->pdev
->dev
,
4540 "VF %u heartbeat timeout\n",
4541 i
- HCLGE_VF_VPORT_START_NUM
);
4546 static void hclge_periodic_service_task(struct hclge_dev
*hdev
)
4548 unsigned long delta
= round_jiffies_relative(HZ
);
4550 if (test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
))
4553 /* Always handle the link updating to make sure link state is
4554 * updated when it is triggered by mbx.
4556 hclge_update_link_status(hdev
);
4557 hclge_sync_mac_table(hdev
);
4558 hclge_sync_promisc_mode(hdev
);
4559 hclge_sync_fd_table(hdev
);
4561 if (time_is_after_jiffies(hdev
->last_serv_processed
+ HZ
)) {
4562 delta
= jiffies
- hdev
->last_serv_processed
;
4564 if (delta
< round_jiffies_relative(HZ
)) {
4565 delta
= round_jiffies_relative(HZ
) - delta
;
4570 hdev
->serv_processed_cnt
++;
4571 hclge_update_vport_alive(hdev
);
4573 if (test_bit(HCLGE_STATE_DOWN
, &hdev
->state
)) {
4574 hdev
->last_serv_processed
= jiffies
;
4578 if (!(hdev
->serv_processed_cnt
% HCLGE_STATS_TIMER_INTERVAL
))
4579 hclge_update_stats_for_all(hdev
);
4581 hclge_update_port_info(hdev
);
4582 hclge_sync_vlan_filter(hdev
);
4584 if (!(hdev
->serv_processed_cnt
% HCLGE_ARFS_EXPIRE_INTERVAL
))
4585 hclge_rfs_filter_expire(hdev
);
4587 hdev
->last_serv_processed
= jiffies
;
4590 hclge_task_schedule(hdev
, delta
);
4593 static void hclge_ptp_service_task(struct hclge_dev
*hdev
)
4595 unsigned long flags
;
4597 if (!test_bit(HCLGE_STATE_PTP_EN
, &hdev
->state
) ||
4598 !test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
) ||
4599 !time_is_before_jiffies(hdev
->ptp
->tx_start
+ HZ
))
4602 /* to prevent concurrence with the irq handler */
4603 spin_lock_irqsave(&hdev
->ptp
->lock
, flags
);
4605 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4606 * handler may handle it just before spin_lock_irqsave().
4608 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING
, &hdev
->state
))
4609 hclge_ptp_clean_tx_hwts(hdev
);
4611 spin_unlock_irqrestore(&hdev
->ptp
->lock
, flags
);
4614 static void hclge_service_task(struct work_struct
*work
)
4616 struct hclge_dev
*hdev
=
4617 container_of(work
, struct hclge_dev
, service_task
.work
);
4619 hclge_errhand_service_task(hdev
);
4620 hclge_reset_service_task(hdev
);
4621 hclge_ptp_service_task(hdev
);
4622 hclge_mailbox_service_task(hdev
);
4623 hclge_periodic_service_task(hdev
);
4625 /* Handle error recovery, reset and mbx again in case periodical task
4626 * delays the handling by calling hclge_task_schedule() in
4627 * hclge_periodic_service_task().
4629 hclge_errhand_service_task(hdev
);
4630 hclge_reset_service_task(hdev
);
4631 hclge_mailbox_service_task(hdev
);
4634 struct hclge_vport
*hclge_get_vport(struct hnae3_handle
*handle
)
4636 /* VF handle has no client */
4637 if (!handle
->client
)
4638 return container_of(handle
, struct hclge_vport
, nic
);
4639 else if (handle
->client
->type
== HNAE3_CLIENT_ROCE
)
4640 return container_of(handle
, struct hclge_vport
, roce
);
4642 return container_of(handle
, struct hclge_vport
, nic
);
4645 static void hclge_get_vector_info(struct hclge_dev
*hdev
, u16 idx
,
4646 struct hnae3_vector_info
*vector_info
)
4648 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4650 vector_info
->vector
= pci_irq_vector(hdev
->pdev
, idx
);
4652 /* need an extend offset to config vector >= 64 */
4653 if (idx
- 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
)
4654 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4655 HCLGE_VECTOR_REG_BASE
+
4656 (idx
- 1) * HCLGE_VECTOR_REG_OFFSET
;
4658 vector_info
->io_addr
= hdev
->hw
.hw
.io_base
+
4659 HCLGE_VECTOR_EXT_REG_BASE
+
4660 (idx
- 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4661 HCLGE_VECTOR_REG_OFFSET_H
+
4662 (idx
- 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2
*
4663 HCLGE_VECTOR_REG_OFFSET
;
4665 hdev
->vector_status
[idx
] = hdev
->vport
[0].vport_id
;
4666 hdev
->vector_irq
[idx
] = vector_info
->vector
;
4669 static int hclge_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
4670 struct hnae3_vector_info
*vector_info
)
4672 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4673 struct hnae3_vector_info
*vector
= vector_info
;
4674 struct hclge_dev
*hdev
= vport
->back
;
4679 vector_num
= min_t(u16
, hdev
->num_nic_msi
- 1, vector_num
);
4680 vector_num
= min(hdev
->num_msi_left
, vector_num
);
4682 for (j
= 0; j
< vector_num
; j
++) {
4683 while (++i
< hdev
->num_nic_msi
) {
4684 if (hdev
->vector_status
[i
] == HCLGE_INVALID_VPORT
) {
4685 hclge_get_vector_info(hdev
, i
, vector
);
4693 hdev
->num_msi_left
-= alloc
;
4694 hdev
->num_msi_used
+= alloc
;
4699 static int hclge_get_vector_index(struct hclge_dev
*hdev
, int vector
)
4703 for (i
= 0; i
< hdev
->num_msi
; i
++)
4704 if (vector
== hdev
->vector_irq
[i
])
4710 static int hclge_put_vector(struct hnae3_handle
*handle
, int vector
)
4712 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4713 struct hclge_dev
*hdev
= vport
->back
;
4716 vector_id
= hclge_get_vector_index(hdev
, vector
);
4717 if (vector_id
< 0) {
4718 dev_err(&hdev
->pdev
->dev
,
4719 "Get vector index fail. vector = %d\n", vector
);
4723 hclge_free_vector(hdev
, vector_id
);
4728 static int hclge_get_rss(struct hnae3_handle
*handle
, u32
*indir
,
4731 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4732 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4733 struct hclge_comm_rss_cfg
*rss_cfg
= &vport
->back
->rss_cfg
;
4735 hclge_comm_get_rss_hash_info(rss_cfg
, key
, hfunc
);
4737 hclge_comm_get_rss_indir_tbl(rss_cfg
, indir
,
4738 ae_dev
->dev_specs
.rss_ind_tbl_size
);
4743 static int hclge_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
4744 const u8
*key
, const u8 hfunc
)
4746 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
4747 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4748 struct hclge_dev
*hdev
= vport
->back
;
4749 struct hclge_comm_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
4752 ret
= hclge_comm_set_rss_hash_key(rss_cfg
, &hdev
->hw
.hw
, key
, hfunc
);
4754 dev_err(&hdev
->pdev
->dev
, "invalid hfunc type %u\n", hfunc
);
4758 /* Update the shadow RSS table with user specified qids */
4759 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
4760 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
4762 /* Update the hardware */
4763 return hclge_comm_set_rss_indir_table(ae_dev
, &hdev
->hw
.hw
,
4764 rss_cfg
->rss_indirection_tbl
);
4767 static int hclge_set_rss_tuple(struct hnae3_handle
*handle
,
4768 struct ethtool_rxnfc
*nfc
)
4770 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4771 struct hclge_dev
*hdev
= vport
->back
;
4774 ret
= hclge_comm_set_rss_tuple(hdev
->ae_dev
, &hdev
->hw
.hw
,
4775 &hdev
->rss_cfg
, nfc
);
4777 dev_err(&hdev
->pdev
->dev
,
4778 "failed to set rss tuple, ret = %d.\n", ret
);
4785 static int hclge_get_rss_tuple(struct hnae3_handle
*handle
,
4786 struct ethtool_rxnfc
*nfc
)
4788 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4794 ret
= hclge_comm_get_rss_tuple(&vport
->back
->rss_cfg
, nfc
->flow_type
,
4796 if (ret
|| !tuple_sets
)
4799 nfc
->data
= hclge_comm_convert_rss_tuple(tuple_sets
);
4804 static int hclge_get_tc_size(struct hnae3_handle
*handle
)
4806 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4807 struct hclge_dev
*hdev
= vport
->back
;
4809 return hdev
->pf_rss_size_max
;
4812 static int hclge_init_rss_tc_mode(struct hclge_dev
*hdev
)
4814 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
4815 struct hclge_vport
*vport
= hdev
->vport
;
4816 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
4817 u16 tc_valid
[HCLGE_MAX_TC_NUM
] = {0};
4818 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
4819 struct hnae3_tc_info
*tc_info
;
4824 tc_info
= &vport
->nic
.kinfo
.tc_info
;
4825 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
4826 rss_size
= tc_info
->tqp_count
[i
];
4829 if (!(hdev
->hw_tc_map
& BIT(i
)))
4832 /* tc_size set to hardware is the log2 of roundup power of two
4833 * of rss_size, the acutal queue size is limited by indirection
4836 if (rss_size
> ae_dev
->dev_specs
.rss_ind_tbl_size
||
4838 dev_err(&hdev
->pdev
->dev
,
4839 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4844 roundup_size
= roundup_pow_of_two(rss_size
);
4845 roundup_size
= ilog2(roundup_size
);
4848 tc_size
[i
] = roundup_size
;
4849 tc_offset
[i
] = tc_info
->tqp_offset
[i
];
4852 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
4856 int hclge_rss_init_hw(struct hclge_dev
*hdev
)
4858 u16
*rss_indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
4859 u8
*key
= hdev
->rss_cfg
.rss_hash_key
;
4860 u8 hfunc
= hdev
->rss_cfg
.rss_algo
;
4863 ret
= hclge_comm_set_rss_indir_table(hdev
->ae_dev
, &hdev
->hw
.hw
,
4868 ret
= hclge_comm_set_rss_algo_key(&hdev
->hw
.hw
, hfunc
, key
);
4872 ret
= hclge_comm_set_rss_input_tuple(&hdev
->hw
.hw
, &hdev
->rss_cfg
);
4876 return hclge_init_rss_tc_mode(hdev
);
4879 int hclge_bind_ring_with_vector(struct hclge_vport
*vport
,
4880 int vector_id
, bool en
,
4881 struct hnae3_ring_chain_node
*ring_chain
)
4883 struct hclge_dev
*hdev
= vport
->back
;
4884 struct hnae3_ring_chain_node
*node
;
4885 struct hclge_desc desc
;
4886 struct hclge_ctrl_vector_chain_cmd
*req
=
4887 (struct hclge_ctrl_vector_chain_cmd
*)desc
.data
;
4888 enum hclge_comm_cmd_status status
;
4889 enum hclge_opcode_type op
;
4890 u16 tqp_type_and_id
;
4893 op
= en
? HCLGE_OPC_ADD_RING_TO_VECTOR
: HCLGE_OPC_DEL_RING_TO_VECTOR
;
4894 hclge_cmd_setup_basic_desc(&desc
, op
, false);
4895 req
->int_vector_id_l
= hnae3_get_field(vector_id
,
4896 HCLGE_VECTOR_ID_L_M
,
4897 HCLGE_VECTOR_ID_L_S
);
4898 req
->int_vector_id_h
= hnae3_get_field(vector_id
,
4899 HCLGE_VECTOR_ID_H_M
,
4900 HCLGE_VECTOR_ID_H_S
);
4903 for (node
= ring_chain
; node
; node
= node
->next
) {
4904 tqp_type_and_id
= le16_to_cpu(req
->tqp_type_and_id
[i
]);
4905 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_TYPE_M
,
4907 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
));
4908 hnae3_set_field(tqp_type_and_id
, HCLGE_TQP_ID_M
,
4909 HCLGE_TQP_ID_S
, node
->tqp_index
);
4910 hnae3_set_field(tqp_type_and_id
, HCLGE_INT_GL_IDX_M
,
4912 hnae3_get_field(node
->int_gl_idx
,
4913 HNAE3_RING_GL_IDX_M
,
4914 HNAE3_RING_GL_IDX_S
));
4915 req
->tqp_type_and_id
[i
] = cpu_to_le16(tqp_type_and_id
);
4916 if (++i
>= HCLGE_VECTOR_ELEMENTS_PER_CMD
) {
4917 req
->int_cause_num
= HCLGE_VECTOR_ELEMENTS_PER_CMD
;
4918 req
->vfid
= vport
->vport_id
;
4920 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4922 dev_err(&hdev
->pdev
->dev
,
4923 "Map TQP fail, status is %d.\n",
4929 hclge_cmd_setup_basic_desc(&desc
,
4932 req
->int_vector_id_l
=
4933 hnae3_get_field(vector_id
,
4934 HCLGE_VECTOR_ID_L_M
,
4935 HCLGE_VECTOR_ID_L_S
);
4936 req
->int_vector_id_h
=
4937 hnae3_get_field(vector_id
,
4938 HCLGE_VECTOR_ID_H_M
,
4939 HCLGE_VECTOR_ID_H_S
);
4944 req
->int_cause_num
= i
;
4945 req
->vfid
= vport
->vport_id
;
4946 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
4948 dev_err(&hdev
->pdev
->dev
,
4949 "Map TQP fail, status is %d.\n", status
);
4957 static int hclge_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
4958 struct hnae3_ring_chain_node
*ring_chain
)
4960 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4961 struct hclge_dev
*hdev
= vport
->back
;
4964 vector_id
= hclge_get_vector_index(hdev
, vector
);
4965 if (vector_id
< 0) {
4966 dev_err(&hdev
->pdev
->dev
,
4967 "failed to get vector index. vector=%d\n", vector
);
4971 return hclge_bind_ring_with_vector(vport
, vector_id
, true, ring_chain
);
4974 static int hclge_unmap_ring_frm_vector(struct hnae3_handle
*handle
, int vector
,
4975 struct hnae3_ring_chain_node
*ring_chain
)
4977 struct hclge_vport
*vport
= hclge_get_vport(handle
);
4978 struct hclge_dev
*hdev
= vport
->back
;
4981 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
4984 vector_id
= hclge_get_vector_index(hdev
, vector
);
4985 if (vector_id
< 0) {
4986 dev_err(&handle
->pdev
->dev
,
4987 "Get vector index fail. ret =%d\n", vector_id
);
4991 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, false, ring_chain
);
4993 dev_err(&handle
->pdev
->dev
,
4994 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5000 static int hclge_cmd_set_promisc_mode(struct hclge_dev
*hdev
, u8 vf_id
,
5001 bool en_uc
, bool en_mc
, bool en_bc
)
5003 struct hclge_vport
*vport
= &hdev
->vport
[vf_id
];
5004 struct hnae3_handle
*handle
= &vport
->nic
;
5005 struct hclge_promisc_cfg_cmd
*req
;
5006 struct hclge_desc desc
;
5007 bool uc_tx_en
= en_uc
;
5011 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PROMISC_MODE
, false);
5013 req
= (struct hclge_promisc_cfg_cmd
*)desc
.data
;
5016 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
))
5019 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_RX_EN
, en_uc
? 1 : 0);
5020 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_RX_EN
, en_mc
? 1 : 0);
5021 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_RX_EN
, en_bc
? 1 : 0);
5022 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_UC_TX_EN
, uc_tx_en
? 1 : 0);
5023 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_MC_TX_EN
, en_mc
? 1 : 0);
5024 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_BC_TX_EN
, en_bc
? 1 : 0);
5025 req
->extend_promisc
= promisc_cfg
;
5027 /* to be compatible with DEVICE_VERSION_V1/2 */
5029 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_UC
, en_uc
? 1 : 0);
5030 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_MC
, en_mc
? 1 : 0);
5031 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_EN_BC
, en_bc
? 1 : 0);
5032 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_TX_EN
, 1);
5033 hnae3_set_bit(promisc_cfg
, HCLGE_PROMISC_RX_EN
, 1);
5034 req
->promisc
= promisc_cfg
;
5036 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5038 dev_err(&hdev
->pdev
->dev
,
5039 "failed to set vport %u promisc mode, ret = %d.\n",
5045 int hclge_set_vport_promisc_mode(struct hclge_vport
*vport
, bool en_uc_pmc
,
5046 bool en_mc_pmc
, bool en_bc_pmc
)
5048 return hclge_cmd_set_promisc_mode(vport
->back
, vport
->vport_id
,
5049 en_uc_pmc
, en_mc_pmc
, en_bc_pmc
);
5052 static int hclge_set_promisc_mode(struct hnae3_handle
*handle
, bool en_uc_pmc
,
5055 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5056 struct hclge_dev
*hdev
= vport
->back
;
5057 bool en_bc_pmc
= true;
5059 /* For device whose version below V2, if broadcast promisc enabled,
5060 * vlan filter is always bypassed. So broadcast promisc should be
5061 * disabled until user enable promisc mode
5063 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
5064 en_bc_pmc
= handle
->netdev_flags
& HNAE3_BPE
? true : false;
5066 return hclge_set_vport_promisc_mode(vport
, en_uc_pmc
, en_mc_pmc
,
5070 static void hclge_request_update_promisc_mode(struct hnae3_handle
*handle
)
5072 struct hclge_vport
*vport
= hclge_get_vport(handle
);
5074 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
5077 static void hclge_sync_fd_state(struct hclge_dev
*hdev
)
5079 if (hlist_empty(&hdev
->fd_rule_list
))
5080 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
5083 static void hclge_fd_inc_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5085 if (!test_bit(location
, hdev
->fd_bmap
)) {
5086 set_bit(location
, hdev
->fd_bmap
);
5087 hdev
->hclge_fd_rule_num
++;
5091 static void hclge_fd_dec_rule_cnt(struct hclge_dev
*hdev
, u16 location
)
5093 if (test_bit(location
, hdev
->fd_bmap
)) {
5094 clear_bit(location
, hdev
->fd_bmap
);
5095 hdev
->hclge_fd_rule_num
--;
5099 static void hclge_fd_free_node(struct hclge_dev
*hdev
,
5100 struct hclge_fd_rule
*rule
)
5102 hlist_del(&rule
->rule_node
);
5104 hclge_sync_fd_state(hdev
);
5107 static void hclge_update_fd_rule_node(struct hclge_dev
*hdev
,
5108 struct hclge_fd_rule
*old_rule
,
5109 struct hclge_fd_rule
*new_rule
,
5110 enum HCLGE_FD_NODE_STATE state
)
5113 case HCLGE_FD_TO_ADD
:
5114 case HCLGE_FD_ACTIVE
:
5115 /* 1) if the new state is TO_ADD, just replace the old rule
5116 * with the same location, no matter its state, because the
5117 * new rule will be configured to the hardware.
5118 * 2) if the new state is ACTIVE, it means the new rule
5119 * has been configured to the hardware, so just replace
5120 * the old rule node with the same location.
5121 * 3) for it doesn't add a new node to the list, so it's
5122 * unnecessary to update the rule number and fd_bmap.
5124 new_rule
->rule_node
.next
= old_rule
->rule_node
.next
;
5125 new_rule
->rule_node
.pprev
= old_rule
->rule_node
.pprev
;
5126 memcpy(old_rule
, new_rule
, sizeof(*old_rule
));
5129 case HCLGE_FD_DELETED
:
5130 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5131 hclge_fd_free_node(hdev
, old_rule
);
5133 case HCLGE_FD_TO_DEL
:
5134 /* if new request is TO_DEL, and old rule is existent
5135 * 1) the state of old rule is TO_DEL, we need do nothing,
5136 * because we delete rule by location, other rule content
5138 * 2) the state of old rule is ACTIVE, we need to change its
5139 * state to TO_DEL, so the rule will be deleted when periodic
5140 * task being scheduled.
5141 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5142 * been added to hardware, so we just delete the rule node from
5143 * fd_rule_list directly.
5145 if (old_rule
->state
== HCLGE_FD_TO_ADD
) {
5146 hclge_fd_dec_rule_cnt(hdev
, old_rule
->location
);
5147 hclge_fd_free_node(hdev
, old_rule
);
5150 old_rule
->state
= HCLGE_FD_TO_DEL
;
5155 static struct hclge_fd_rule
*hclge_find_fd_rule(struct hlist_head
*hlist
,
5157 struct hclge_fd_rule
**parent
)
5159 struct hclge_fd_rule
*rule
;
5160 struct hlist_node
*node
;
5162 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
5163 if (rule
->location
== location
)
5165 else if (rule
->location
> location
)
5167 /* record the parent node, use to keep the nodes in fd_rule_list
5176 /* insert fd rule node in ascend order according to rule->location */
5177 static void hclge_fd_insert_rule_node(struct hlist_head
*hlist
,
5178 struct hclge_fd_rule
*rule
,
5179 struct hclge_fd_rule
*parent
)
5181 INIT_HLIST_NODE(&rule
->rule_node
);
5184 hlist_add_behind(&rule
->rule_node
, &parent
->rule_node
);
5186 hlist_add_head(&rule
->rule_node
, hlist
);
5189 static int hclge_fd_set_user_def_cmd(struct hclge_dev
*hdev
,
5190 struct hclge_fd_user_def_cfg
*cfg
)
5192 struct hclge_fd_user_def_cfg_cmd
*req
;
5193 struct hclge_desc desc
;
5197 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_USER_DEF_OP
, false);
5199 req
= (struct hclge_fd_user_def_cfg_cmd
*)desc
.data
;
5201 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[0].ref_cnt
> 0);
5202 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5203 HCLGE_FD_USER_DEF_OFT_S
, cfg
[0].offset
);
5204 req
->ol2_cfg
= cpu_to_le16(data
);
5207 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[1].ref_cnt
> 0);
5208 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5209 HCLGE_FD_USER_DEF_OFT_S
, cfg
[1].offset
);
5210 req
->ol3_cfg
= cpu_to_le16(data
);
5213 hnae3_set_bit(data
, HCLGE_FD_USER_DEF_EN_B
, cfg
[2].ref_cnt
> 0);
5214 hnae3_set_field(data
, HCLGE_FD_USER_DEF_OFT_M
,
5215 HCLGE_FD_USER_DEF_OFT_S
, cfg
[2].offset
);
5216 req
->ol4_cfg
= cpu_to_le16(data
);
5218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5220 dev_err(&hdev
->pdev
->dev
,
5221 "failed to set fd user def data, ret= %d\n", ret
);
5225 static void hclge_sync_fd_user_def_cfg(struct hclge_dev
*hdev
, bool locked
)
5229 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
))
5233 spin_lock_bh(&hdev
->fd_rule_lock
);
5235 ret
= hclge_fd_set_user_def_cmd(hdev
, hdev
->fd_cfg
.user_def_cfg
);
5237 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5240 spin_unlock_bh(&hdev
->fd_rule_lock
);
5243 static int hclge_fd_check_user_def_refcnt(struct hclge_dev
*hdev
,
5244 struct hclge_fd_rule
*rule
)
5246 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5247 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5248 struct hclge_fd_user_def_info
*info
, *old_info
;
5249 struct hclge_fd_user_def_cfg
*cfg
;
5251 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5252 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5255 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5256 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5257 info
= &rule
->ep
.user_def
;
5259 if (!cfg
->ref_cnt
|| cfg
->offset
== info
->offset
)
5262 if (cfg
->ref_cnt
> 1)
5265 fd_rule
= hclge_find_fd_rule(hlist
, rule
->location
, &parent
);
5267 old_info
= &fd_rule
->ep
.user_def
;
5268 if (info
->layer
== old_info
->layer
)
5273 dev_err(&hdev
->pdev
->dev
,
5274 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5279 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev
*hdev
,
5280 struct hclge_fd_rule
*rule
)
5282 struct hclge_fd_user_def_cfg
*cfg
;
5284 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5285 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5288 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5289 if (!cfg
->ref_cnt
) {
5290 cfg
->offset
= rule
->ep
.user_def
.offset
;
5291 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5296 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev
*hdev
,
5297 struct hclge_fd_rule
*rule
)
5299 struct hclge_fd_user_def_cfg
*cfg
;
5301 if (!rule
|| rule
->rule_type
!= HCLGE_FD_EP_ACTIVE
||
5302 rule
->ep
.user_def
.layer
== HCLGE_FD_USER_DEF_NONE
)
5305 cfg
= &hdev
->fd_cfg
.user_def_cfg
[rule
->ep
.user_def
.layer
- 1];
5310 if (!cfg
->ref_cnt
) {
5312 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
5316 static void hclge_update_fd_list(struct hclge_dev
*hdev
,
5317 enum HCLGE_FD_NODE_STATE state
, u16 location
,
5318 struct hclge_fd_rule
*new_rule
)
5320 struct hlist_head
*hlist
= &hdev
->fd_rule_list
;
5321 struct hclge_fd_rule
*fd_rule
, *parent
= NULL
;
5323 fd_rule
= hclge_find_fd_rule(hlist
, location
, &parent
);
5325 hclge_fd_dec_user_def_refcnt(hdev
, fd_rule
);
5326 if (state
== HCLGE_FD_ACTIVE
)
5327 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5328 hclge_sync_fd_user_def_cfg(hdev
, true);
5330 hclge_update_fd_rule_node(hdev
, fd_rule
, new_rule
, state
);
5334 /* it's unlikely to fail here, because we have checked the rule
5337 if (unlikely(state
== HCLGE_FD_TO_DEL
|| state
== HCLGE_FD_DELETED
)) {
5338 dev_warn(&hdev
->pdev
->dev
,
5339 "failed to delete fd rule %u, it's inexistent\n",
5344 hclge_fd_inc_user_def_refcnt(hdev
, new_rule
);
5345 hclge_sync_fd_user_def_cfg(hdev
, true);
5347 hclge_fd_insert_rule_node(hlist
, new_rule
, parent
);
5348 hclge_fd_inc_rule_cnt(hdev
, new_rule
->location
);
5350 if (state
== HCLGE_FD_TO_ADD
) {
5351 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
5352 hclge_task_schedule(hdev
, 0);
5356 static int hclge_get_fd_mode(struct hclge_dev
*hdev
, u8
*fd_mode
)
5358 struct hclge_get_fd_mode_cmd
*req
;
5359 struct hclge_desc desc
;
5362 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_MODE_CTRL
, true);
5364 req
= (struct hclge_get_fd_mode_cmd
*)desc
.data
;
5366 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5368 dev_err(&hdev
->pdev
->dev
, "get fd mode fail, ret=%d\n", ret
);
5372 *fd_mode
= req
->mode
;
5377 static int hclge_get_fd_allocation(struct hclge_dev
*hdev
,
5378 u32
*stage1_entry_num
,
5379 u32
*stage2_entry_num
,
5380 u16
*stage1_counter_num
,
5381 u16
*stage2_counter_num
)
5383 struct hclge_get_fd_allocation_cmd
*req
;
5384 struct hclge_desc desc
;
5387 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_GET_ALLOCATION
, true);
5389 req
= (struct hclge_get_fd_allocation_cmd
*)desc
.data
;
5391 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5393 dev_err(&hdev
->pdev
->dev
, "query fd allocation fail, ret=%d\n",
5398 *stage1_entry_num
= le32_to_cpu(req
->stage1_entry_num
);
5399 *stage2_entry_num
= le32_to_cpu(req
->stage2_entry_num
);
5400 *stage1_counter_num
= le16_to_cpu(req
->stage1_counter_num
);
5401 *stage2_counter_num
= le16_to_cpu(req
->stage2_counter_num
);
5406 static int hclge_set_fd_key_config(struct hclge_dev
*hdev
,
5407 enum HCLGE_FD_STAGE stage_num
)
5409 struct hclge_set_fd_key_config_cmd
*req
;
5410 struct hclge_fd_key_cfg
*stage
;
5411 struct hclge_desc desc
;
5414 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_KEY_CONFIG
, false);
5416 req
= (struct hclge_set_fd_key_config_cmd
*)desc
.data
;
5417 stage
= &hdev
->fd_cfg
.key_cfg
[stage_num
];
5418 req
->stage
= stage_num
;
5419 req
->key_select
= stage
->key_sel
;
5420 req
->inner_sipv6_word_en
= stage
->inner_sipv6_word_en
;
5421 req
->inner_dipv6_word_en
= stage
->inner_dipv6_word_en
;
5422 req
->outer_sipv6_word_en
= stage
->outer_sipv6_word_en
;
5423 req
->outer_dipv6_word_en
= stage
->outer_dipv6_word_en
;
5424 req
->tuple_mask
= cpu_to_le32(~stage
->tuple_active
);
5425 req
->meta_data_mask
= cpu_to_le32(~stage
->meta_data_active
);
5427 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5429 dev_err(&hdev
->pdev
->dev
, "set fd key fail, ret=%d\n", ret
);
5434 static void hclge_fd_disable_user_def(struct hclge_dev
*hdev
)
5436 struct hclge_fd_user_def_cfg
*cfg
= hdev
->fd_cfg
.user_def_cfg
;
5438 spin_lock_bh(&hdev
->fd_rule_lock
);
5439 memset(cfg
, 0, sizeof(hdev
->fd_cfg
.user_def_cfg
));
5440 spin_unlock_bh(&hdev
->fd_rule_lock
);
5442 hclge_fd_set_user_def_cmd(hdev
, cfg
);
5445 static int hclge_init_fd_config(struct hclge_dev
*hdev
)
5447 #define LOW_2_WORDS 0x03
5448 struct hclge_fd_key_cfg
*key_cfg
;
5451 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
5454 ret
= hclge_get_fd_mode(hdev
, &hdev
->fd_cfg
.fd_mode
);
5458 switch (hdev
->fd_cfg
.fd_mode
) {
5459 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
:
5460 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
;
5462 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1
:
5463 hdev
->fd_cfg
.max_key_length
= MAX_KEY_LENGTH
/ 2;
5466 dev_err(&hdev
->pdev
->dev
,
5467 "Unsupported flow director mode %u\n",
5468 hdev
->fd_cfg
.fd_mode
);
5472 key_cfg
= &hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
];
5473 key_cfg
->key_sel
= HCLGE_FD_KEY_BASE_ON_TUPLE
;
5474 key_cfg
->inner_sipv6_word_en
= LOW_2_WORDS
;
5475 key_cfg
->inner_dipv6_word_en
= LOW_2_WORDS
;
5476 key_cfg
->outer_sipv6_word_en
= 0;
5477 key_cfg
->outer_dipv6_word_en
= 0;
5479 key_cfg
->tuple_active
= BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_ETH_TYPE
) |
5480 BIT(INNER_IP_PROTO
) | BIT(INNER_IP_TOS
) |
5481 BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
5482 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5484 /* If use max 400bit key, we can support tuples for ether type */
5485 if (hdev
->fd_cfg
.fd_mode
== HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
5486 key_cfg
->tuple_active
|=
5487 BIT(INNER_DST_MAC
) | BIT(INNER_SRC_MAC
);
5488 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
5489 key_cfg
->tuple_active
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
5492 /* roce_type is used to filter roce frames
5493 * dst_vport is used to specify the rule
5495 key_cfg
->meta_data_active
= BIT(ROCE_TYPE
) | BIT(DST_VPORT
);
5497 ret
= hclge_get_fd_allocation(hdev
,
5498 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
],
5499 &hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_2
],
5500 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
],
5501 &hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_2
]);
5505 return hclge_set_fd_key_config(hdev
, HCLGE_FD_STAGE_1
);
5508 static int hclge_fd_tcam_config(struct hclge_dev
*hdev
, u8 stage
, bool sel_x
,
5509 int loc
, u8
*key
, bool is_add
)
5511 struct hclge_fd_tcam_config_1_cmd
*req1
;
5512 struct hclge_fd_tcam_config_2_cmd
*req2
;
5513 struct hclge_fd_tcam_config_3_cmd
*req3
;
5514 struct hclge_desc desc
[3];
5517 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_FD_TCAM_OP
, false);
5518 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5519 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_FD_TCAM_OP
, false);
5520 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
5521 hclge_cmd_setup_basic_desc(&desc
[2], HCLGE_OPC_FD_TCAM_OP
, false);
5523 req1
= (struct hclge_fd_tcam_config_1_cmd
*)desc
[0].data
;
5524 req2
= (struct hclge_fd_tcam_config_2_cmd
*)desc
[1].data
;
5525 req3
= (struct hclge_fd_tcam_config_3_cmd
*)desc
[2].data
;
5527 req1
->stage
= stage
;
5528 req1
->xy_sel
= sel_x
? 1 : 0;
5529 hnae3_set_bit(req1
->port_info
, HCLGE_FD_EPORT_SW_EN_B
, 0);
5530 req1
->index
= cpu_to_le32(loc
);
5531 req1
->entry_vld
= sel_x
? is_add
: 0;
5534 memcpy(req1
->tcam_data
, &key
[0], sizeof(req1
->tcam_data
));
5535 memcpy(req2
->tcam_data
, &key
[sizeof(req1
->tcam_data
)],
5536 sizeof(req2
->tcam_data
));
5537 memcpy(req3
->tcam_data
, &key
[sizeof(req1
->tcam_data
) +
5538 sizeof(req2
->tcam_data
)], sizeof(req3
->tcam_data
));
5541 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
5543 dev_err(&hdev
->pdev
->dev
,
5544 "config tcam key fail, ret=%d\n",
5550 static int hclge_fd_ad_config(struct hclge_dev
*hdev
, u8 stage
, int loc
,
5551 struct hclge_fd_ad_data
*action
)
5553 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
5554 struct hclge_fd_ad_config_cmd
*req
;
5555 struct hclge_desc desc
;
5559 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_FD_AD_OP
, false);
5561 req
= (struct hclge_fd_ad_config_cmd
*)desc
.data
;
5562 req
->index
= cpu_to_le32(loc
);
5565 hnae3_set_bit(ad_data
, HCLGE_FD_AD_WR_RULE_ID_B
,
5566 action
->write_rule_id_to_bd
);
5567 hnae3_set_field(ad_data
, HCLGE_FD_AD_RULE_ID_M
, HCLGE_FD_AD_RULE_ID_S
,
5569 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
)) {
5570 hnae3_set_bit(ad_data
, HCLGE_FD_AD_TC_OVRD_B
,
5571 action
->override_tc
);
5572 hnae3_set_field(ad_data
, HCLGE_FD_AD_TC_SIZE_M
,
5573 HCLGE_FD_AD_TC_SIZE_S
, (u32
)action
->tc_size
);
5576 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DROP_B
, action
->drop_packet
);
5577 hnae3_set_bit(ad_data
, HCLGE_FD_AD_DIRECT_QID_B
,
5578 action
->forward_to_direct_queue
);
5579 hnae3_set_field(ad_data
, HCLGE_FD_AD_QID_M
, HCLGE_FD_AD_QID_S
,
5581 hnae3_set_bit(ad_data
, HCLGE_FD_AD_USE_COUNTER_B
, action
->use_counter
);
5582 hnae3_set_field(ad_data
, HCLGE_FD_AD_COUNTER_NUM_M
,
5583 HCLGE_FD_AD_COUNTER_NUM_S
, action
->counter_id
);
5584 hnae3_set_bit(ad_data
, HCLGE_FD_AD_NXT_STEP_B
, action
->use_next_stage
);
5585 hnae3_set_field(ad_data
, HCLGE_FD_AD_NXT_KEY_M
, HCLGE_FD_AD_NXT_KEY_S
,
5586 action
->counter_id
);
5588 req
->ad_data
= cpu_to_le64(ad_data
);
5589 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
5591 dev_err(&hdev
->pdev
->dev
, "fd ad config fail, ret=%d\n", ret
);
5596 static bool hclge_fd_convert_tuple(u32 tuple_bit
, u8
*key_x
, u8
*key_y
,
5597 struct hclge_fd_rule
*rule
)
5599 int offset
, moffset
, ip_offset
;
5600 enum HCLGE_FD_KEY_OPT key_opt
;
5601 u16 tmp_x_s
, tmp_y_s
;
5602 u32 tmp_x_l
, tmp_y_l
;
5606 if (rule
->unused_tuple
& BIT(tuple_bit
))
5609 key_opt
= tuple_key_info
[tuple_bit
].key_opt
;
5610 offset
= tuple_key_info
[tuple_bit
].offset
;
5611 moffset
= tuple_key_info
[tuple_bit
].moffset
;
5615 calc_x(*key_x
, p
[offset
], p
[moffset
]);
5616 calc_y(*key_y
, p
[offset
], p
[moffset
]);
5620 calc_x(tmp_x_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5621 calc_y(tmp_y_s
, *(u16
*)(&p
[offset
]), *(u16
*)(&p
[moffset
]));
5622 *(__le16
*)key_x
= cpu_to_le16(tmp_x_s
);
5623 *(__le16
*)key_y
= cpu_to_le16(tmp_y_s
);
5627 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5628 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
]), *(u32
*)(&p
[moffset
]));
5629 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5630 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5634 for (i
= 0; i
< ETH_ALEN
; i
++) {
5635 calc_x(key_x
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5637 calc_y(key_y
[ETH_ALEN
- 1 - i
], p
[offset
+ i
],
5643 ip_offset
= IPV4_INDEX
* sizeof(u32
);
5644 calc_x(tmp_x_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5645 *(u32
*)(&p
[moffset
+ ip_offset
]));
5646 calc_y(tmp_y_l
, *(u32
*)(&p
[offset
+ ip_offset
]),
5647 *(u32
*)(&p
[moffset
+ ip_offset
]));
5648 *(__le32
*)key_x
= cpu_to_le32(tmp_x_l
);
5649 *(__le32
*)key_y
= cpu_to_le32(tmp_y_l
);
5657 static u32
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type
, u8 pf_id
,
5658 u8 vf_id
, u8 network_port_id
)
5660 u32 port_number
= 0;
5662 if (port_type
== HOST_PORT
) {
5663 hnae3_set_field(port_number
, HCLGE_PF_ID_M
, HCLGE_PF_ID_S
,
5665 hnae3_set_field(port_number
, HCLGE_VF_ID_M
, HCLGE_VF_ID_S
,
5667 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, HOST_PORT
);
5669 hnae3_set_field(port_number
, HCLGE_NETWORK_PORT_ID_M
,
5670 HCLGE_NETWORK_PORT_ID_S
, network_port_id
);
5671 hnae3_set_bit(port_number
, HCLGE_PORT_TYPE_B
, NETWORK_PORT
);
5677 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg
*key_cfg
,
5678 __le32
*key_x
, __le32
*key_y
,
5679 struct hclge_fd_rule
*rule
)
5681 u32 tuple_bit
, meta_data
= 0, tmp_x
, tmp_y
, port_number
;
5682 u8 cur_pos
= 0, tuple_size
, shift_bits
;
5685 for (i
= 0; i
< MAX_META_DATA
; i
++) {
5686 tuple_size
= meta_data_key_info
[i
].key_length
;
5687 tuple_bit
= key_cfg
->meta_data_active
& BIT(i
);
5689 switch (tuple_bit
) {
5690 case BIT(ROCE_TYPE
):
5691 hnae3_set_bit(meta_data
, cur_pos
, NIC_PACKET
);
5692 cur_pos
+= tuple_size
;
5694 case BIT(DST_VPORT
):
5695 port_number
= hclge_get_port_number(HOST_PORT
, 0,
5697 hnae3_set_field(meta_data
,
5698 GENMASK(cur_pos
+ tuple_size
, cur_pos
),
5699 cur_pos
, port_number
);
5700 cur_pos
+= tuple_size
;
5707 calc_x(tmp_x
, meta_data
, 0xFFFFFFFF);
5708 calc_y(tmp_y
, meta_data
, 0xFFFFFFFF);
5709 shift_bits
= sizeof(meta_data
) * 8 - cur_pos
;
5711 *key_x
= cpu_to_le32(tmp_x
<< shift_bits
);
5712 *key_y
= cpu_to_le32(tmp_y
<< shift_bits
);
5715 /* A complete key is combined with meta data key and tuple key.
5716 * Meta data key is stored at the MSB region, and tuple key is stored at
5717 * the LSB region, unused bits will be filled 0.
5719 static int hclge_config_key(struct hclge_dev
*hdev
, u8 stage
,
5720 struct hclge_fd_rule
*rule
)
5722 struct hclge_fd_key_cfg
*key_cfg
= &hdev
->fd_cfg
.key_cfg
[stage
];
5723 u8 key_x
[MAX_KEY_BYTES
], key_y
[MAX_KEY_BYTES
];
5724 u8
*cur_key_x
, *cur_key_y
;
5725 u8 meta_data_region
;
5730 memset(key_x
, 0, sizeof(key_x
));
5731 memset(key_y
, 0, sizeof(key_y
));
5735 for (i
= 0; i
< MAX_TUPLE
; i
++) {
5738 tuple_size
= tuple_key_info
[i
].key_length
/ 8;
5739 if (!(key_cfg
->tuple_active
& BIT(i
)))
5742 tuple_valid
= hclge_fd_convert_tuple(i
, cur_key_x
,
5745 cur_key_x
+= tuple_size
;
5746 cur_key_y
+= tuple_size
;
5750 meta_data_region
= hdev
->fd_cfg
.max_key_length
/ 8 -
5751 MAX_META_DATA_LENGTH
/ 8;
5753 hclge_fd_convert_meta_data(key_cfg
,
5754 (__le32
*)(key_x
+ meta_data_region
),
5755 (__le32
*)(key_y
+ meta_data_region
),
5758 ret
= hclge_fd_tcam_config(hdev
, stage
, false, rule
->location
, key_y
,
5761 dev_err(&hdev
->pdev
->dev
,
5762 "fd key_y config fail, loc=%u, ret=%d\n",
5763 rule
->queue_id
, ret
);
5767 ret
= hclge_fd_tcam_config(hdev
, stage
, true, rule
->location
, key_x
,
5770 dev_err(&hdev
->pdev
->dev
,
5771 "fd key_x config fail, loc=%u, ret=%d\n",
5772 rule
->queue_id
, ret
);
5776 static int hclge_config_action(struct hclge_dev
*hdev
, u8 stage
,
5777 struct hclge_fd_rule
*rule
)
5779 struct hclge_vport
*vport
= hdev
->vport
;
5780 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
5781 struct hclge_fd_ad_data ad_data
;
5783 memset(&ad_data
, 0, sizeof(struct hclge_fd_ad_data
));
5784 ad_data
.ad_id
= rule
->location
;
5786 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
5787 ad_data
.drop_packet
= true;
5788 } else if (rule
->action
== HCLGE_FD_ACTION_SELECT_TC
) {
5789 ad_data
.override_tc
= true;
5791 kinfo
->tc_info
.tqp_offset
[rule
->cls_flower
.tc
];
5793 ilog2(kinfo
->tc_info
.tqp_count
[rule
->cls_flower
.tc
]);
5795 ad_data
.forward_to_direct_queue
= true;
5796 ad_data
.queue_id
= rule
->queue_id
;
5799 if (hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
]) {
5800 ad_data
.use_counter
= true;
5801 ad_data
.counter_id
= rule
->vf_id
%
5802 hdev
->fd_cfg
.cnt_num
[HCLGE_FD_STAGE_1
];
5804 ad_data
.use_counter
= false;
5805 ad_data
.counter_id
= 0;
5808 ad_data
.use_next_stage
= false;
5809 ad_data
.next_input_key
= 0;
5811 ad_data
.write_rule_id_to_bd
= true;
5812 ad_data
.rule_id
= rule
->location
;
5814 return hclge_fd_ad_config(hdev
, stage
, ad_data
.ad_id
, &ad_data
);
5817 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec
*spec
,
5820 if (!spec
|| !unused_tuple
)
5823 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5826 *unused_tuple
|= BIT(INNER_SRC_IP
);
5829 *unused_tuple
|= BIT(INNER_DST_IP
);
5832 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5835 *unused_tuple
|= BIT(INNER_DST_PORT
);
5838 *unused_tuple
|= BIT(INNER_IP_TOS
);
5843 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec
*spec
,
5846 if (!spec
|| !unused_tuple
)
5849 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5850 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5853 *unused_tuple
|= BIT(INNER_SRC_IP
);
5856 *unused_tuple
|= BIT(INNER_DST_IP
);
5859 *unused_tuple
|= BIT(INNER_IP_TOS
);
5862 *unused_tuple
|= BIT(INNER_IP_PROTO
);
5864 if (spec
->l4_4_bytes
)
5867 if (spec
->ip_ver
!= ETH_RX_NFC_IP4
)
5873 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec
*spec
,
5876 if (!spec
|| !unused_tuple
)
5879 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
);
5881 /* check whether src/dst ip address used */
5882 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5883 *unused_tuple
|= BIT(INNER_SRC_IP
);
5885 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5886 *unused_tuple
|= BIT(INNER_DST_IP
);
5889 *unused_tuple
|= BIT(INNER_SRC_PORT
);
5892 *unused_tuple
|= BIT(INNER_DST_PORT
);
5895 *unused_tuple
|= BIT(INNER_IP_TOS
);
5900 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec
*spec
,
5903 if (!spec
|| !unused_tuple
)
5906 *unused_tuple
|= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
5907 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
);
5909 /* check whether src/dst ip address used */
5910 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6src
))
5911 *unused_tuple
|= BIT(INNER_SRC_IP
);
5913 if (ipv6_addr_any((struct in6_addr
*)spec
->ip6dst
))
5914 *unused_tuple
|= BIT(INNER_DST_IP
);
5916 if (!spec
->l4_proto
)
5917 *unused_tuple
|= BIT(INNER_IP_PROTO
);
5920 *unused_tuple
|= BIT(INNER_IP_TOS
);
5922 if (spec
->l4_4_bytes
)
5928 static int hclge_fd_check_ether_tuple(struct ethhdr
*spec
, u32
*unused_tuple
)
5930 if (!spec
|| !unused_tuple
)
5933 *unused_tuple
|= BIT(INNER_SRC_IP
) | BIT(INNER_DST_IP
) |
5934 BIT(INNER_SRC_PORT
) | BIT(INNER_DST_PORT
) |
5935 BIT(INNER_IP_TOS
) | BIT(INNER_IP_PROTO
);
5937 if (is_zero_ether_addr(spec
->h_source
))
5938 *unused_tuple
|= BIT(INNER_SRC_MAC
);
5940 if (is_zero_ether_addr(spec
->h_dest
))
5941 *unused_tuple
|= BIT(INNER_DST_MAC
);
5944 *unused_tuple
|= BIT(INNER_ETH_TYPE
);
5949 static int hclge_fd_check_ext_tuple(struct hclge_dev
*hdev
,
5950 struct ethtool_rx_flow_spec
*fs
,
5953 if (fs
->flow_type
& FLOW_EXT
) {
5954 if (fs
->h_ext
.vlan_etype
) {
5955 dev_err(&hdev
->pdev
->dev
, "vlan-etype is not supported!\n");
5959 if (!fs
->h_ext
.vlan_tci
)
5960 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
5962 if (fs
->m_ext
.vlan_tci
&&
5963 be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
) {
5964 dev_err(&hdev
->pdev
->dev
,
5965 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5966 ntohs(fs
->h_ext
.vlan_tci
), VLAN_N_VID
- 1);
5970 *unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
5973 if (fs
->flow_type
& FLOW_MAC_EXT
) {
5974 if (hdev
->fd_cfg
.fd_mode
!=
5975 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
5976 dev_err(&hdev
->pdev
->dev
,
5977 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5981 if (is_zero_ether_addr(fs
->h_ext
.h_dest
))
5982 *unused_tuple
|= BIT(INNER_DST_MAC
);
5984 *unused_tuple
&= ~BIT(INNER_DST_MAC
);
5990 static int hclge_fd_get_user_def_layer(u32 flow_type
, u32
*unused_tuple
,
5991 struct hclge_fd_user_def_info
*info
)
5993 switch (flow_type
) {
5995 info
->layer
= HCLGE_FD_USER_DEF_L2
;
5996 *unused_tuple
&= ~BIT(INNER_L2_RSV
);
5999 case IPV6_USER_FLOW
:
6000 info
->layer
= HCLGE_FD_USER_DEF_L3
;
6001 *unused_tuple
&= ~BIT(INNER_L3_RSV
);
6007 info
->layer
= HCLGE_FD_USER_DEF_L4
;
6008 *unused_tuple
&= ~BIT(INNER_L4_RSV
);
6017 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec
*fs
)
6019 return be32_to_cpu(fs
->m_ext
.data
[1] | fs
->m_ext
.data
[0]) == 0;
6022 static int hclge_fd_parse_user_def_field(struct hclge_dev
*hdev
,
6023 struct ethtool_rx_flow_spec
*fs
,
6025 struct hclge_fd_user_def_info
*info
)
6027 u32 tuple_active
= hdev
->fd_cfg
.key_cfg
[HCLGE_FD_STAGE_1
].tuple_active
;
6028 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6029 u16 data
, offset
, data_mask
, offset_mask
;
6032 info
->layer
= HCLGE_FD_USER_DEF_NONE
;
6033 *unused_tuple
|= HCLGE_FD_TUPLE_USER_DEF_TUPLES
;
6035 if (!(fs
->flow_type
& FLOW_EXT
) || hclge_fd_is_user_def_all_masked(fs
))
6038 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6039 * for data, and bit32~47 is used for offset.
6041 data
= be32_to_cpu(fs
->h_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6042 data_mask
= be32_to_cpu(fs
->m_ext
.data
[1]) & HCLGE_FD_USER_DEF_DATA
;
6043 offset
= be32_to_cpu(fs
->h_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6044 offset_mask
= be32_to_cpu(fs
->m_ext
.data
[0]) & HCLGE_FD_USER_DEF_OFFSET
;
6046 if (!(tuple_active
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
)) {
6047 dev_err(&hdev
->pdev
->dev
, "user-def bytes are not supported\n");
6051 if (offset
> HCLGE_FD_MAX_USER_DEF_OFFSET
) {
6052 dev_err(&hdev
->pdev
->dev
,
6053 "user-def offset[%u] should be no more than %u\n",
6054 offset
, HCLGE_FD_MAX_USER_DEF_OFFSET
);
6058 if (offset_mask
!= HCLGE_FD_USER_DEF_OFFSET_UNMASK
) {
6059 dev_err(&hdev
->pdev
->dev
, "user-def offset can't be masked\n");
6063 ret
= hclge_fd_get_user_def_layer(flow_type
, unused_tuple
, info
);
6065 dev_err(&hdev
->pdev
->dev
,
6066 "unsupported flow type for user-def bytes, ret = %d\n",
6072 info
->data_mask
= data_mask
;
6073 info
->offset
= offset
;
6078 static int hclge_fd_check_spec(struct hclge_dev
*hdev
,
6079 struct ethtool_rx_flow_spec
*fs
,
6081 struct hclge_fd_user_def_info
*info
)
6086 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
6087 dev_err(&hdev
->pdev
->dev
,
6088 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6090 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
] - 1);
6094 ret
= hclge_fd_parse_user_def_field(hdev
, fs
, unused_tuple
, info
);
6098 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6099 switch (flow_type
) {
6103 ret
= hclge_fd_check_tcpip4_tuple(&fs
->h_u
.tcp_ip4_spec
,
6107 ret
= hclge_fd_check_ip4_tuple(&fs
->h_u
.usr_ip4_spec
,
6113 ret
= hclge_fd_check_tcpip6_tuple(&fs
->h_u
.tcp_ip6_spec
,
6116 case IPV6_USER_FLOW
:
6117 ret
= hclge_fd_check_ip6_tuple(&fs
->h_u
.usr_ip6_spec
,
6121 if (hdev
->fd_cfg
.fd_mode
!=
6122 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1
) {
6123 dev_err(&hdev
->pdev
->dev
,
6124 "ETHER_FLOW is not supported in current fd mode!\n");
6128 ret
= hclge_fd_check_ether_tuple(&fs
->h_u
.ether_spec
,
6132 dev_err(&hdev
->pdev
->dev
,
6133 "unsupported protocol type, protocol type = %#x\n",
6139 dev_err(&hdev
->pdev
->dev
,
6140 "failed to check flow union tuple, ret = %d\n",
6145 return hclge_fd_check_ext_tuple(hdev
, fs
, unused_tuple
);
6148 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6149 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6151 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6152 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4src
);
6153 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6154 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4src
);
6156 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6157 be32_to_cpu(fs
->h_u
.tcp_ip4_spec
.ip4dst
);
6158 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6159 be32_to_cpu(fs
->m_u
.tcp_ip4_spec
.ip4dst
);
6161 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.psrc
);
6162 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.psrc
);
6164 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip4_spec
.pdst
);
6165 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip4_spec
.pdst
);
6167 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip4_spec
.tos
;
6168 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip4_spec
.tos
;
6170 rule
->tuples
.ether_proto
= ETH_P_IP
;
6171 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6173 rule
->tuples
.ip_proto
= ip_proto
;
6174 rule
->tuples_mask
.ip_proto
= 0xFF;
6177 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec
*fs
,
6178 struct hclge_fd_rule
*rule
)
6180 rule
->tuples
.src_ip
[IPV4_INDEX
] =
6181 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4src
);
6182 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
6183 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4src
);
6185 rule
->tuples
.dst_ip
[IPV4_INDEX
] =
6186 be32_to_cpu(fs
->h_u
.usr_ip4_spec
.ip4dst
);
6187 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
6188 be32_to_cpu(fs
->m_u
.usr_ip4_spec
.ip4dst
);
6190 rule
->tuples
.ip_tos
= fs
->h_u
.usr_ip4_spec
.tos
;
6191 rule
->tuples_mask
.ip_tos
= fs
->m_u
.usr_ip4_spec
.tos
;
6193 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip4_spec
.proto
;
6194 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
6196 rule
->tuples
.ether_proto
= ETH_P_IP
;
6197 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6200 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6201 struct hclge_fd_rule
*rule
, u8 ip_proto
)
6203 be32_to_cpu_array(rule
->tuples
.src_ip
, fs
->h_u
.tcp_ip6_spec
.ip6src
,
6205 be32_to_cpu_array(rule
->tuples_mask
.src_ip
, fs
->m_u
.tcp_ip6_spec
.ip6src
,
6208 be32_to_cpu_array(rule
->tuples
.dst_ip
, fs
->h_u
.tcp_ip6_spec
.ip6dst
,
6210 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
, fs
->m_u
.tcp_ip6_spec
.ip6dst
,
6213 rule
->tuples
.src_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.psrc
);
6214 rule
->tuples_mask
.src_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.psrc
);
6216 rule
->tuples
.dst_port
= be16_to_cpu(fs
->h_u
.tcp_ip6_spec
.pdst
);
6217 rule
->tuples_mask
.dst_port
= be16_to_cpu(fs
->m_u
.tcp_ip6_spec
.pdst
);
6219 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6220 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6222 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6223 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6225 rule
->tuples
.ip_proto
= ip_proto
;
6226 rule
->tuples_mask
.ip_proto
= 0xFF;
6229 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec
*fs
,
6230 struct hclge_fd_rule
*rule
)
6232 be32_to_cpu_array(rule
->tuples
.src_ip
, fs
->h_u
.usr_ip6_spec
.ip6src
,
6234 be32_to_cpu_array(rule
->tuples_mask
.src_ip
, fs
->m_u
.usr_ip6_spec
.ip6src
,
6237 be32_to_cpu_array(rule
->tuples
.dst_ip
, fs
->h_u
.usr_ip6_spec
.ip6dst
,
6239 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
, fs
->m_u
.usr_ip6_spec
.ip6dst
,
6242 rule
->tuples
.ip_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
;
6243 rule
->tuples_mask
.ip_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
6245 rule
->tuples
.ip_tos
= fs
->h_u
.tcp_ip6_spec
.tclass
;
6246 rule
->tuples_mask
.ip_tos
= fs
->m_u
.tcp_ip6_spec
.tclass
;
6248 rule
->tuples
.ether_proto
= ETH_P_IPV6
;
6249 rule
->tuples_mask
.ether_proto
= 0xFFFF;
6252 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec
*fs
,
6253 struct hclge_fd_rule
*rule
)
6255 ether_addr_copy(rule
->tuples
.src_mac
, fs
->h_u
.ether_spec
.h_source
);
6256 ether_addr_copy(rule
->tuples_mask
.src_mac
, fs
->m_u
.ether_spec
.h_source
);
6258 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_u
.ether_spec
.h_dest
);
6259 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_u
.ether_spec
.h_dest
);
6261 rule
->tuples
.ether_proto
= be16_to_cpu(fs
->h_u
.ether_spec
.h_proto
);
6262 rule
->tuples_mask
.ether_proto
= be16_to_cpu(fs
->m_u
.ether_spec
.h_proto
);
6265 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info
*info
,
6266 struct hclge_fd_rule
*rule
)
6268 switch (info
->layer
) {
6269 case HCLGE_FD_USER_DEF_L2
:
6270 rule
->tuples
.l2_user_def
= info
->data
;
6271 rule
->tuples_mask
.l2_user_def
= info
->data_mask
;
6273 case HCLGE_FD_USER_DEF_L3
:
6274 rule
->tuples
.l3_user_def
= info
->data
;
6275 rule
->tuples_mask
.l3_user_def
= info
->data_mask
;
6277 case HCLGE_FD_USER_DEF_L4
:
6278 rule
->tuples
.l4_user_def
= (u32
)info
->data
<< 16;
6279 rule
->tuples_mask
.l4_user_def
= (u32
)info
->data_mask
<< 16;
6285 rule
->ep
.user_def
= *info
;
6288 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec
*fs
,
6289 struct hclge_fd_rule
*rule
,
6290 struct hclge_fd_user_def_info
*info
)
6292 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
6294 switch (flow_type
) {
6296 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_SCTP
);
6299 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_TCP
);
6302 hclge_fd_get_tcpip4_tuple(fs
, rule
, IPPROTO_UDP
);
6305 hclge_fd_get_ip4_tuple(fs
, rule
);
6308 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_SCTP
);
6311 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_TCP
);
6314 hclge_fd_get_tcpip6_tuple(fs
, rule
, IPPROTO_UDP
);
6316 case IPV6_USER_FLOW
:
6317 hclge_fd_get_ip6_tuple(fs
, rule
);
6320 hclge_fd_get_ether_tuple(fs
, rule
);
6326 if (fs
->flow_type
& FLOW_EXT
) {
6327 rule
->tuples
.vlan_tag1
= be16_to_cpu(fs
->h_ext
.vlan_tci
);
6328 rule
->tuples_mask
.vlan_tag1
= be16_to_cpu(fs
->m_ext
.vlan_tci
);
6329 hclge_fd_get_user_def_tuple(info
, rule
);
6332 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6333 ether_addr_copy(rule
->tuples
.dst_mac
, fs
->h_ext
.h_dest
);
6334 ether_addr_copy(rule
->tuples_mask
.dst_mac
, fs
->m_ext
.h_dest
);
6340 static int hclge_fd_config_rule(struct hclge_dev
*hdev
,
6341 struct hclge_fd_rule
*rule
)
6345 ret
= hclge_config_action(hdev
, HCLGE_FD_STAGE_1
, rule
);
6349 return hclge_config_key(hdev
, HCLGE_FD_STAGE_1
, rule
);
6352 static int hclge_add_fd_entry_common(struct hclge_dev
*hdev
,
6353 struct hclge_fd_rule
*rule
)
6357 spin_lock_bh(&hdev
->fd_rule_lock
);
6359 if (hdev
->fd_active_type
!= rule
->rule_type
&&
6360 (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6361 hdev
->fd_active_type
== HCLGE_FD_EP_ACTIVE
)) {
6362 dev_err(&hdev
->pdev
->dev
,
6363 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6364 rule
->rule_type
, hdev
->fd_active_type
);
6365 spin_unlock_bh(&hdev
->fd_rule_lock
);
6369 ret
= hclge_fd_check_user_def_refcnt(hdev
, rule
);
6373 ret
= hclge_clear_arfs_rules(hdev
);
6377 ret
= hclge_fd_config_rule(hdev
, rule
);
6381 rule
->state
= HCLGE_FD_ACTIVE
;
6382 hdev
->fd_active_type
= rule
->rule_type
;
6383 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
6386 spin_unlock_bh(&hdev
->fd_rule_lock
);
6390 static bool hclge_is_cls_flower_active(struct hnae3_handle
*handle
)
6392 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6393 struct hclge_dev
*hdev
= vport
->back
;
6395 return hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
;
6398 static int hclge_fd_parse_ring_cookie(struct hclge_dev
*hdev
, u64 ring_cookie
,
6399 u16
*vport_id
, u8
*action
, u16
*queue_id
)
6401 struct hclge_vport
*vport
= hdev
->vport
;
6403 if (ring_cookie
== RX_CLS_FLOW_DISC
) {
6404 *action
= HCLGE_FD_ACTION_DROP_PACKET
;
6406 u32 ring
= ethtool_get_flow_spec_ring(ring_cookie
);
6407 u8 vf
= ethtool_get_flow_spec_ring_vf(ring_cookie
);
6410 /* To keep consistent with user's configuration, minus 1 when
6411 * printing 'vf', because vf id from ethtool is added 1 for vf.
6413 if (vf
> hdev
->num_req_vfs
) {
6414 dev_err(&hdev
->pdev
->dev
,
6415 "Error: vf id (%u) should be less than %u\n",
6416 vf
- 1U, hdev
->num_req_vfs
);
6420 *vport_id
= vf
? hdev
->vport
[vf
].vport_id
: vport
->vport_id
;
6421 tqps
= hdev
->vport
[vf
].nic
.kinfo
.num_tqps
;
6424 dev_err(&hdev
->pdev
->dev
,
6425 "Error: queue id (%u) > max tqp num (%u)\n",
6430 *action
= HCLGE_FD_ACTION_SELECT_QUEUE
;
6437 static int hclge_add_fd_entry(struct hnae3_handle
*handle
,
6438 struct ethtool_rxnfc
*cmd
)
6440 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6441 struct hclge_dev
*hdev
= vport
->back
;
6442 struct hclge_fd_user_def_info info
;
6443 u16 dst_vport_id
= 0, q_index
= 0;
6444 struct ethtool_rx_flow_spec
*fs
;
6445 struct hclge_fd_rule
*rule
;
6450 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
6451 dev_err(&hdev
->pdev
->dev
,
6452 "flow table director is not supported\n");
6457 dev_err(&hdev
->pdev
->dev
,
6458 "please enable flow director first\n");
6462 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6464 ret
= hclge_fd_check_spec(hdev
, fs
, &unused
, &info
);
6468 ret
= hclge_fd_parse_ring_cookie(hdev
, fs
->ring_cookie
, &dst_vport_id
,
6473 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
6477 ret
= hclge_fd_get_tuple(fs
, rule
, &info
);
6483 rule
->flow_type
= fs
->flow_type
;
6484 rule
->location
= fs
->location
;
6485 rule
->unused_tuple
= unused
;
6486 rule
->vf_id
= dst_vport_id
;
6487 rule
->queue_id
= q_index
;
6488 rule
->action
= action
;
6489 rule
->rule_type
= HCLGE_FD_EP_ACTIVE
;
6491 ret
= hclge_add_fd_entry_common(hdev
, rule
);
6498 static int hclge_del_fd_entry(struct hnae3_handle
*handle
,
6499 struct ethtool_rxnfc
*cmd
)
6501 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6502 struct hclge_dev
*hdev
= vport
->back
;
6503 struct ethtool_rx_flow_spec
*fs
;
6506 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6509 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6511 if (fs
->location
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6514 spin_lock_bh(&hdev
->fd_rule_lock
);
6515 if (hdev
->fd_active_type
== HCLGE_FD_TC_FLOWER_ACTIVE
||
6516 !test_bit(fs
->location
, hdev
->fd_bmap
)) {
6517 dev_err(&hdev
->pdev
->dev
,
6518 "Delete fail, rule %u is inexistent\n", fs
->location
);
6519 spin_unlock_bh(&hdev
->fd_rule_lock
);
6523 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, fs
->location
,
6528 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, fs
->location
, NULL
);
6531 spin_unlock_bh(&hdev
->fd_rule_lock
);
6535 static void hclge_clear_fd_rules_in_list(struct hclge_dev
*hdev
,
6538 struct hclge_fd_rule
*rule
;
6539 struct hlist_node
*node
;
6542 spin_lock_bh(&hdev
->fd_rule_lock
);
6544 for_each_set_bit(location
, hdev
->fd_bmap
,
6545 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
])
6546 hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, location
,
6550 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
,
6552 hlist_del(&rule
->rule_node
);
6555 hdev
->fd_active_type
= HCLGE_FD_RULE_NONE
;
6556 hdev
->hclge_fd_rule_num
= 0;
6557 bitmap_zero(hdev
->fd_bmap
,
6558 hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
6561 spin_unlock_bh(&hdev
->fd_rule_lock
);
6564 static void hclge_del_all_fd_entries(struct hclge_dev
*hdev
)
6566 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6569 hclge_clear_fd_rules_in_list(hdev
, true);
6570 hclge_fd_disable_user_def(hdev
);
6573 static int hclge_restore_fd_entries(struct hnae3_handle
*handle
)
6575 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6576 struct hclge_dev
*hdev
= vport
->back
;
6577 struct hclge_fd_rule
*rule
;
6578 struct hlist_node
*node
;
6580 /* Return ok here, because reset error handling will check this
6581 * return value. If error is returned here, the reset process will
6584 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6587 /* if fd is disabled, should not restore it when reset */
6591 spin_lock_bh(&hdev
->fd_rule_lock
);
6592 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
6593 if (rule
->state
== HCLGE_FD_ACTIVE
)
6594 rule
->state
= HCLGE_FD_TO_ADD
;
6596 spin_unlock_bh(&hdev
->fd_rule_lock
);
6597 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
6602 static int hclge_get_fd_rule_cnt(struct hnae3_handle
*handle
,
6603 struct ethtool_rxnfc
*cmd
)
6605 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6606 struct hclge_dev
*hdev
= vport
->back
;
6608 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
) || hclge_is_cls_flower_active(handle
))
6611 cmd
->rule_cnt
= hdev
->hclge_fd_rule_num
;
6612 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6617 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule
*rule
,
6618 struct ethtool_tcpip4_spec
*spec
,
6619 struct ethtool_tcpip4_spec
*spec_mask
)
6621 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6622 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6623 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6625 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6626 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6627 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6629 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6630 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6631 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6633 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6634 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6635 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6637 spec
->tos
= rule
->tuples
.ip_tos
;
6638 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6639 0 : rule
->tuples_mask
.ip_tos
;
6642 static void hclge_fd_get_ip4_info(struct hclge_fd_rule
*rule
,
6643 struct ethtool_usrip4_spec
*spec
,
6644 struct ethtool_usrip4_spec
*spec_mask
)
6646 spec
->ip4src
= cpu_to_be32(rule
->tuples
.src_ip
[IPV4_INDEX
]);
6647 spec_mask
->ip4src
= rule
->unused_tuple
& BIT(INNER_SRC_IP
) ?
6648 0 : cpu_to_be32(rule
->tuples_mask
.src_ip
[IPV4_INDEX
]);
6650 spec
->ip4dst
= cpu_to_be32(rule
->tuples
.dst_ip
[IPV4_INDEX
]);
6651 spec_mask
->ip4dst
= rule
->unused_tuple
& BIT(INNER_DST_IP
) ?
6652 0 : cpu_to_be32(rule
->tuples_mask
.dst_ip
[IPV4_INDEX
]);
6654 spec
->tos
= rule
->tuples
.ip_tos
;
6655 spec_mask
->tos
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6656 0 : rule
->tuples_mask
.ip_tos
;
6658 spec
->proto
= rule
->tuples
.ip_proto
;
6659 spec_mask
->proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6660 0 : rule
->tuples_mask
.ip_proto
;
6662 spec
->ip_ver
= ETH_RX_NFC_IP4
;
6665 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule
*rule
,
6666 struct ethtool_tcpip6_spec
*spec
,
6667 struct ethtool_tcpip6_spec
*spec_mask
)
6669 cpu_to_be32_array(spec
->ip6src
,
6670 rule
->tuples
.src_ip
, IPV6_SIZE
);
6671 cpu_to_be32_array(spec
->ip6dst
,
6672 rule
->tuples
.dst_ip
, IPV6_SIZE
);
6673 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6674 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6676 cpu_to_be32_array(spec_mask
->ip6src
, rule
->tuples_mask
.src_ip
,
6679 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6680 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6682 cpu_to_be32_array(spec_mask
->ip6dst
, rule
->tuples_mask
.dst_ip
,
6685 spec
->tclass
= rule
->tuples
.ip_tos
;
6686 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6687 0 : rule
->tuples_mask
.ip_tos
;
6689 spec
->psrc
= cpu_to_be16(rule
->tuples
.src_port
);
6690 spec_mask
->psrc
= rule
->unused_tuple
& BIT(INNER_SRC_PORT
) ?
6691 0 : cpu_to_be16(rule
->tuples_mask
.src_port
);
6693 spec
->pdst
= cpu_to_be16(rule
->tuples
.dst_port
);
6694 spec_mask
->pdst
= rule
->unused_tuple
& BIT(INNER_DST_PORT
) ?
6695 0 : cpu_to_be16(rule
->tuples_mask
.dst_port
);
6698 static void hclge_fd_get_ip6_info(struct hclge_fd_rule
*rule
,
6699 struct ethtool_usrip6_spec
*spec
,
6700 struct ethtool_usrip6_spec
*spec_mask
)
6702 cpu_to_be32_array(spec
->ip6src
, rule
->tuples
.src_ip
, IPV6_SIZE
);
6703 cpu_to_be32_array(spec
->ip6dst
, rule
->tuples
.dst_ip
, IPV6_SIZE
);
6704 if (rule
->unused_tuple
& BIT(INNER_SRC_IP
))
6705 memset(spec_mask
->ip6src
, 0, sizeof(spec_mask
->ip6src
));
6707 cpu_to_be32_array(spec_mask
->ip6src
,
6708 rule
->tuples_mask
.src_ip
, IPV6_SIZE
);
6710 if (rule
->unused_tuple
& BIT(INNER_DST_IP
))
6711 memset(spec_mask
->ip6dst
, 0, sizeof(spec_mask
->ip6dst
));
6713 cpu_to_be32_array(spec_mask
->ip6dst
,
6714 rule
->tuples_mask
.dst_ip
, IPV6_SIZE
);
6716 spec
->tclass
= rule
->tuples
.ip_tos
;
6717 spec_mask
->tclass
= rule
->unused_tuple
& BIT(INNER_IP_TOS
) ?
6718 0 : rule
->tuples_mask
.ip_tos
;
6720 spec
->l4_proto
= rule
->tuples
.ip_proto
;
6721 spec_mask
->l4_proto
= rule
->unused_tuple
& BIT(INNER_IP_PROTO
) ?
6722 0 : rule
->tuples_mask
.ip_proto
;
6725 static void hclge_fd_get_ether_info(struct hclge_fd_rule
*rule
,
6726 struct ethhdr
*spec
,
6727 struct ethhdr
*spec_mask
)
6729 ether_addr_copy(spec
->h_source
, rule
->tuples
.src_mac
);
6730 ether_addr_copy(spec
->h_dest
, rule
->tuples
.dst_mac
);
6732 if (rule
->unused_tuple
& BIT(INNER_SRC_MAC
))
6733 eth_zero_addr(spec_mask
->h_source
);
6735 ether_addr_copy(spec_mask
->h_source
, rule
->tuples_mask
.src_mac
);
6737 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6738 eth_zero_addr(spec_mask
->h_dest
);
6740 ether_addr_copy(spec_mask
->h_dest
, rule
->tuples_mask
.dst_mac
);
6742 spec
->h_proto
= cpu_to_be16(rule
->tuples
.ether_proto
);
6743 spec_mask
->h_proto
= rule
->unused_tuple
& BIT(INNER_ETH_TYPE
) ?
6744 0 : cpu_to_be16(rule
->tuples_mask
.ether_proto
);
6747 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec
*fs
,
6748 struct hclge_fd_rule
*rule
)
6750 if ((rule
->unused_tuple
& HCLGE_FD_TUPLE_USER_DEF_TUPLES
) ==
6751 HCLGE_FD_TUPLE_USER_DEF_TUPLES
) {
6752 fs
->h_ext
.data
[0] = 0;
6753 fs
->h_ext
.data
[1] = 0;
6754 fs
->m_ext
.data
[0] = 0;
6755 fs
->m_ext
.data
[1] = 0;
6757 fs
->h_ext
.data
[0] = cpu_to_be32(rule
->ep
.user_def
.offset
);
6758 fs
->h_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data
);
6760 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK
);
6761 fs
->m_ext
.data
[1] = cpu_to_be32(rule
->ep
.user_def
.data_mask
);
6765 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec
*fs
,
6766 struct hclge_fd_rule
*rule
)
6768 if (fs
->flow_type
& FLOW_EXT
) {
6769 fs
->h_ext
.vlan_tci
= cpu_to_be16(rule
->tuples
.vlan_tag1
);
6770 fs
->m_ext
.vlan_tci
=
6771 rule
->unused_tuple
& BIT(INNER_VLAN_TAG_FST
) ?
6772 0 : cpu_to_be16(rule
->tuples_mask
.vlan_tag1
);
6774 hclge_fd_get_user_def_info(fs
, rule
);
6777 if (fs
->flow_type
& FLOW_MAC_EXT
) {
6778 ether_addr_copy(fs
->h_ext
.h_dest
, rule
->tuples
.dst_mac
);
6779 if (rule
->unused_tuple
& BIT(INNER_DST_MAC
))
6780 eth_zero_addr(fs
->m_u
.ether_spec
.h_dest
);
6782 ether_addr_copy(fs
->m_u
.ether_spec
.h_dest
,
6783 rule
->tuples_mask
.dst_mac
);
6787 static struct hclge_fd_rule
*hclge_get_fd_rule(struct hclge_dev
*hdev
,
6790 struct hclge_fd_rule
*rule
= NULL
;
6791 struct hlist_node
*node2
;
6793 hlist_for_each_entry_safe(rule
, node2
, &hdev
->fd_rule_list
, rule_node
) {
6794 if (rule
->location
== location
)
6796 else if (rule
->location
> location
)
6803 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec
*fs
,
6804 struct hclge_fd_rule
*rule
)
6806 if (rule
->action
== HCLGE_FD_ACTION_DROP_PACKET
) {
6807 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
6811 fs
->ring_cookie
= rule
->queue_id
;
6812 vf_id
= rule
->vf_id
;
6813 vf_id
<<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
6814 fs
->ring_cookie
|= vf_id
;
6818 static int hclge_get_fd_rule_info(struct hnae3_handle
*handle
,
6819 struct ethtool_rxnfc
*cmd
)
6821 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6822 struct hclge_fd_rule
*rule
= NULL
;
6823 struct hclge_dev
*hdev
= vport
->back
;
6824 struct ethtool_rx_flow_spec
*fs
;
6826 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6829 fs
= (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
6831 spin_lock_bh(&hdev
->fd_rule_lock
);
6833 rule
= hclge_get_fd_rule(hdev
, fs
->location
);
6835 spin_unlock_bh(&hdev
->fd_rule_lock
);
6839 fs
->flow_type
= rule
->flow_type
;
6840 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
6844 hclge_fd_get_tcpip4_info(rule
, &fs
->h_u
.tcp_ip4_spec
,
6845 &fs
->m_u
.tcp_ip4_spec
);
6848 hclge_fd_get_ip4_info(rule
, &fs
->h_u
.usr_ip4_spec
,
6849 &fs
->m_u
.usr_ip4_spec
);
6854 hclge_fd_get_tcpip6_info(rule
, &fs
->h_u
.tcp_ip6_spec
,
6855 &fs
->m_u
.tcp_ip6_spec
);
6857 case IPV6_USER_FLOW
:
6858 hclge_fd_get_ip6_info(rule
, &fs
->h_u
.usr_ip6_spec
,
6859 &fs
->m_u
.usr_ip6_spec
);
6861 /* The flow type of fd rule has been checked before adding in to rule
6862 * list. As other flow types have been handled, it must be ETHER_FLOW
6863 * for the default case
6866 hclge_fd_get_ether_info(rule
, &fs
->h_u
.ether_spec
,
6867 &fs
->m_u
.ether_spec
);
6871 hclge_fd_get_ext_info(fs
, rule
);
6873 hclge_fd_get_ring_cookie(fs
, rule
);
6875 spin_unlock_bh(&hdev
->fd_rule_lock
);
6880 static int hclge_get_all_rules(struct hnae3_handle
*handle
,
6881 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
6883 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6884 struct hclge_dev
*hdev
= vport
->back
;
6885 struct hclge_fd_rule
*rule
;
6886 struct hlist_node
*node2
;
6889 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6892 cmd
->data
= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
];
6894 spin_lock_bh(&hdev
->fd_rule_lock
);
6895 hlist_for_each_entry_safe(rule
, node2
,
6896 &hdev
->fd_rule_list
, rule_node
) {
6897 if (cnt
== cmd
->rule_cnt
) {
6898 spin_unlock_bh(&hdev
->fd_rule_lock
);
6902 if (rule
->state
== HCLGE_FD_TO_DEL
)
6905 rule_locs
[cnt
] = rule
->location
;
6909 spin_unlock_bh(&hdev
->fd_rule_lock
);
6911 cmd
->rule_cnt
= cnt
;
6916 static void hclge_fd_get_flow_tuples(const struct flow_keys
*fkeys
,
6917 struct hclge_fd_rule_tuples
*tuples
)
6919 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6920 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6922 tuples
->ether_proto
= be16_to_cpu(fkeys
->basic
.n_proto
);
6923 tuples
->ip_proto
= fkeys
->basic
.ip_proto
;
6924 tuples
->dst_port
= be16_to_cpu(fkeys
->ports
.dst
);
6926 if (fkeys
->basic
.n_proto
== htons(ETH_P_IP
)) {
6927 tuples
->src_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.src
);
6928 tuples
->dst_ip
[3] = be32_to_cpu(fkeys
->addrs
.v4addrs
.dst
);
6932 for (i
= 0; i
< IPV6_SIZE
; i
++) {
6933 tuples
->src_ip
[i
] = be32_to_cpu(flow_ip6_src
[i
]);
6934 tuples
->dst_ip
[i
] = be32_to_cpu(flow_ip6_dst
[i
]);
6939 /* traverse all rules, check whether an existed rule has the same tuples */
6940 static struct hclge_fd_rule
*
6941 hclge_fd_search_flow_keys(struct hclge_dev
*hdev
,
6942 const struct hclge_fd_rule_tuples
*tuples
)
6944 struct hclge_fd_rule
*rule
= NULL
;
6945 struct hlist_node
*node
;
6947 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
6948 if (!memcmp(tuples
, &rule
->tuples
, sizeof(*tuples
)))
6955 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples
*tuples
,
6956 struct hclge_fd_rule
*rule
)
6958 rule
->unused_tuple
= BIT(INNER_SRC_MAC
) | BIT(INNER_DST_MAC
) |
6959 BIT(INNER_VLAN_TAG_FST
) | BIT(INNER_IP_TOS
) |
6960 BIT(INNER_SRC_PORT
);
6963 rule
->rule_type
= HCLGE_FD_ARFS_ACTIVE
;
6964 rule
->state
= HCLGE_FD_TO_ADD
;
6965 if (tuples
->ether_proto
== ETH_P_IP
) {
6966 if (tuples
->ip_proto
== IPPROTO_TCP
)
6967 rule
->flow_type
= TCP_V4_FLOW
;
6969 rule
->flow_type
= UDP_V4_FLOW
;
6971 if (tuples
->ip_proto
== IPPROTO_TCP
)
6972 rule
->flow_type
= TCP_V6_FLOW
;
6974 rule
->flow_type
= UDP_V6_FLOW
;
6976 memcpy(&rule
->tuples
, tuples
, sizeof(rule
->tuples
));
6977 memset(&rule
->tuples_mask
, 0xFF, sizeof(rule
->tuples_mask
));
6980 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle
*handle
, u16 queue_id
,
6981 u16 flow_id
, struct flow_keys
*fkeys
)
6983 struct hclge_vport
*vport
= hclge_get_vport(handle
);
6984 struct hclge_fd_rule_tuples new_tuples
= {};
6985 struct hclge_dev
*hdev
= vport
->back
;
6986 struct hclge_fd_rule
*rule
;
6989 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
6992 /* when there is already fd rule existed add by user,
6993 * arfs should not work
6995 spin_lock_bh(&hdev
->fd_rule_lock
);
6996 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
&&
6997 hdev
->fd_active_type
!= HCLGE_FD_RULE_NONE
) {
6998 spin_unlock_bh(&hdev
->fd_rule_lock
);
7002 hclge_fd_get_flow_tuples(fkeys
, &new_tuples
);
7004 /* check is there flow director filter existed for this flow,
7005 * if not, create a new filter for it;
7006 * if filter exist with different queue id, modify the filter;
7007 * if filter exist with same queue id, do nothing
7009 rule
= hclge_fd_search_flow_keys(hdev
, &new_tuples
);
7011 bit_id
= find_first_zero_bit(hdev
->fd_bmap
, MAX_FD_FILTER_NUM
);
7012 if (bit_id
>= hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7013 spin_unlock_bh(&hdev
->fd_rule_lock
);
7017 rule
= kzalloc(sizeof(*rule
), GFP_ATOMIC
);
7019 spin_unlock_bh(&hdev
->fd_rule_lock
);
7023 rule
->location
= bit_id
;
7024 rule
->arfs
.flow_id
= flow_id
;
7025 rule
->queue_id
= queue_id
;
7026 hclge_fd_build_arfs_rule(&new_tuples
, rule
);
7027 hclge_update_fd_list(hdev
, rule
->state
, rule
->location
, rule
);
7028 hdev
->fd_active_type
= HCLGE_FD_ARFS_ACTIVE
;
7029 } else if (rule
->queue_id
!= queue_id
) {
7030 rule
->queue_id
= queue_id
;
7031 rule
->state
= HCLGE_FD_TO_ADD
;
7032 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7033 hclge_task_schedule(hdev
, 0);
7035 spin_unlock_bh(&hdev
->fd_rule_lock
);
7036 return rule
->location
;
7039 static void hclge_rfs_filter_expire(struct hclge_dev
*hdev
)
7041 #ifdef CONFIG_RFS_ACCEL
7042 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
7043 struct hclge_fd_rule
*rule
;
7044 struct hlist_node
*node
;
7046 spin_lock_bh(&hdev
->fd_rule_lock
);
7047 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
) {
7048 spin_unlock_bh(&hdev
->fd_rule_lock
);
7051 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7052 if (rule
->state
!= HCLGE_FD_ACTIVE
)
7054 if (rps_may_expire_flow(handle
->netdev
, rule
->queue_id
,
7055 rule
->arfs
.flow_id
, rule
->location
)) {
7056 rule
->state
= HCLGE_FD_TO_DEL
;
7057 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7060 spin_unlock_bh(&hdev
->fd_rule_lock
);
7064 /* make sure being called after lock up with fd_rule_lock */
7065 static int hclge_clear_arfs_rules(struct hclge_dev
*hdev
)
7067 #ifdef CONFIG_RFS_ACCEL
7068 struct hclge_fd_rule
*rule
;
7069 struct hlist_node
*node
;
7072 if (hdev
->fd_active_type
!= HCLGE_FD_ARFS_ACTIVE
)
7075 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7076 switch (rule
->state
) {
7077 case HCLGE_FD_TO_DEL
:
7078 case HCLGE_FD_ACTIVE
:
7079 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7080 rule
->location
, NULL
, false);
7084 case HCLGE_FD_TO_ADD
:
7085 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7086 hlist_del(&rule
->rule_node
);
7093 hclge_sync_fd_state(hdev
);
7099 static void hclge_get_cls_key_basic(const struct flow_rule
*flow
,
7100 struct hclge_fd_rule
*rule
)
7102 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_BASIC
)) {
7103 struct flow_match_basic match
;
7104 u16 ethtype_key
, ethtype_mask
;
7106 flow_rule_match_basic(flow
, &match
);
7107 ethtype_key
= ntohs(match
.key
->n_proto
);
7108 ethtype_mask
= ntohs(match
.mask
->n_proto
);
7110 if (ethtype_key
== ETH_P_ALL
) {
7114 rule
->tuples
.ether_proto
= ethtype_key
;
7115 rule
->tuples_mask
.ether_proto
= ethtype_mask
;
7116 rule
->tuples
.ip_proto
= match
.key
->ip_proto
;
7117 rule
->tuples_mask
.ip_proto
= match
.mask
->ip_proto
;
7119 rule
->unused_tuple
|= BIT(INNER_IP_PROTO
);
7120 rule
->unused_tuple
|= BIT(INNER_ETH_TYPE
);
7124 static void hclge_get_cls_key_mac(const struct flow_rule
*flow
,
7125 struct hclge_fd_rule
*rule
)
7127 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7128 struct flow_match_eth_addrs match
;
7130 flow_rule_match_eth_addrs(flow
, &match
);
7131 ether_addr_copy(rule
->tuples
.dst_mac
, match
.key
->dst
);
7132 ether_addr_copy(rule
->tuples_mask
.dst_mac
, match
.mask
->dst
);
7133 ether_addr_copy(rule
->tuples
.src_mac
, match
.key
->src
);
7134 ether_addr_copy(rule
->tuples_mask
.src_mac
, match
.mask
->src
);
7136 rule
->unused_tuple
|= BIT(INNER_DST_MAC
);
7137 rule
->unused_tuple
|= BIT(INNER_SRC_MAC
);
7141 static void hclge_get_cls_key_vlan(const struct flow_rule
*flow
,
7142 struct hclge_fd_rule
*rule
)
7144 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_VLAN
)) {
7145 struct flow_match_vlan match
;
7147 flow_rule_match_vlan(flow
, &match
);
7148 rule
->tuples
.vlan_tag1
= match
.key
->vlan_id
|
7149 (match
.key
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7150 rule
->tuples_mask
.vlan_tag1
= match
.mask
->vlan_id
|
7151 (match
.mask
->vlan_priority
<< VLAN_PRIO_SHIFT
);
7153 rule
->unused_tuple
|= BIT(INNER_VLAN_TAG_FST
);
7157 static void hclge_get_cls_key_ip(const struct flow_rule
*flow
,
7158 struct hclge_fd_rule
*rule
)
7162 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7163 struct flow_match_control match
;
7165 flow_rule_match_control(flow
, &match
);
7166 addr_type
= match
.key
->addr_type
;
7169 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7170 struct flow_match_ipv4_addrs match
;
7172 flow_rule_match_ipv4_addrs(flow
, &match
);
7173 rule
->tuples
.src_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->src
);
7174 rule
->tuples_mask
.src_ip
[IPV4_INDEX
] =
7175 be32_to_cpu(match
.mask
->src
);
7176 rule
->tuples
.dst_ip
[IPV4_INDEX
] = be32_to_cpu(match
.key
->dst
);
7177 rule
->tuples_mask
.dst_ip
[IPV4_INDEX
] =
7178 be32_to_cpu(match
.mask
->dst
);
7179 } else if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7180 struct flow_match_ipv6_addrs match
;
7182 flow_rule_match_ipv6_addrs(flow
, &match
);
7183 be32_to_cpu_array(rule
->tuples
.src_ip
, match
.key
->src
.s6_addr32
,
7185 be32_to_cpu_array(rule
->tuples_mask
.src_ip
,
7186 match
.mask
->src
.s6_addr32
, IPV6_SIZE
);
7187 be32_to_cpu_array(rule
->tuples
.dst_ip
, match
.key
->dst
.s6_addr32
,
7189 be32_to_cpu_array(rule
->tuples_mask
.dst_ip
,
7190 match
.mask
->dst
.s6_addr32
, IPV6_SIZE
);
7192 rule
->unused_tuple
|= BIT(INNER_SRC_IP
);
7193 rule
->unused_tuple
|= BIT(INNER_DST_IP
);
7197 static void hclge_get_cls_key_port(const struct flow_rule
*flow
,
7198 struct hclge_fd_rule
*rule
)
7200 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_PORTS
)) {
7201 struct flow_match_ports match
;
7203 flow_rule_match_ports(flow
, &match
);
7205 rule
->tuples
.src_port
= be16_to_cpu(match
.key
->src
);
7206 rule
->tuples_mask
.src_port
= be16_to_cpu(match
.mask
->src
);
7207 rule
->tuples
.dst_port
= be16_to_cpu(match
.key
->dst
);
7208 rule
->tuples_mask
.dst_port
= be16_to_cpu(match
.mask
->dst
);
7210 rule
->unused_tuple
|= BIT(INNER_SRC_PORT
);
7211 rule
->unused_tuple
|= BIT(INNER_DST_PORT
);
7215 static int hclge_parse_cls_flower(struct hclge_dev
*hdev
,
7216 struct flow_cls_offload
*cls_flower
,
7217 struct hclge_fd_rule
*rule
)
7219 struct flow_rule
*flow
= flow_cls_offload_flow_rule(cls_flower
);
7220 struct flow_dissector
*dissector
= flow
->match
.dissector
;
7222 if (dissector
->used_keys
&
7223 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL
) |
7224 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC
) |
7225 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7226 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN
) |
7227 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7228 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7229 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS
))) {
7230 dev_err(&hdev
->pdev
->dev
, "unsupported key set: %#llx\n",
7231 dissector
->used_keys
);
7235 hclge_get_cls_key_basic(flow
, rule
);
7236 hclge_get_cls_key_mac(flow
, rule
);
7237 hclge_get_cls_key_vlan(flow
, rule
);
7238 hclge_get_cls_key_ip(flow
, rule
);
7239 hclge_get_cls_key_port(flow
, rule
);
7244 static int hclge_check_cls_flower(struct hclge_dev
*hdev
,
7245 struct flow_cls_offload
*cls_flower
, int tc
)
7247 u32 prio
= cls_flower
->common
.prio
;
7249 if (tc
< 0 || tc
> hdev
->tc_max
) {
7250 dev_err(&hdev
->pdev
->dev
, "invalid traffic class\n");
7255 prio
> hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]) {
7256 dev_err(&hdev
->pdev
->dev
,
7257 "prio %u should be in range[1, %u]\n",
7258 prio
, hdev
->fd_cfg
.rule_num
[HCLGE_FD_STAGE_1
]);
7262 if (test_bit(prio
- 1, hdev
->fd_bmap
)) {
7263 dev_err(&hdev
->pdev
->dev
, "prio %u is already used\n", prio
);
7269 static int hclge_add_cls_flower(struct hnae3_handle
*handle
,
7270 struct flow_cls_offload
*cls_flower
,
7273 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7274 struct hclge_dev
*hdev
= vport
->back
;
7275 struct hclge_fd_rule
*rule
;
7278 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
)) {
7279 dev_err(&hdev
->pdev
->dev
,
7280 "cls flower is not supported\n");
7284 ret
= hclge_check_cls_flower(hdev
, cls_flower
, tc
);
7286 dev_err(&hdev
->pdev
->dev
,
7287 "failed to check cls flower params, ret = %d\n", ret
);
7291 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
7295 ret
= hclge_parse_cls_flower(hdev
, cls_flower
, rule
);
7301 rule
->action
= HCLGE_FD_ACTION_SELECT_TC
;
7302 rule
->cls_flower
.tc
= tc
;
7303 rule
->location
= cls_flower
->common
.prio
- 1;
7305 rule
->cls_flower
.cookie
= cls_flower
->cookie
;
7306 rule
->rule_type
= HCLGE_FD_TC_FLOWER_ACTIVE
;
7308 ret
= hclge_add_fd_entry_common(hdev
, rule
);
7315 static struct hclge_fd_rule
*hclge_find_cls_flower(struct hclge_dev
*hdev
,
7316 unsigned long cookie
)
7318 struct hclge_fd_rule
*rule
;
7319 struct hlist_node
*node
;
7321 hlist_for_each_entry_safe(rule
, node
, &hdev
->fd_rule_list
, rule_node
) {
7322 if (rule
->cls_flower
.cookie
== cookie
)
7329 static int hclge_del_cls_flower(struct hnae3_handle
*handle
,
7330 struct flow_cls_offload
*cls_flower
)
7332 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7333 struct hclge_dev
*hdev
= vport
->back
;
7334 struct hclge_fd_rule
*rule
;
7337 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7340 spin_lock_bh(&hdev
->fd_rule_lock
);
7342 rule
= hclge_find_cls_flower(hdev
, cls_flower
->cookie
);
7344 spin_unlock_bh(&hdev
->fd_rule_lock
);
7348 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true, rule
->location
,
7351 spin_unlock_bh(&hdev
->fd_rule_lock
);
7355 hclge_update_fd_list(hdev
, HCLGE_FD_DELETED
, rule
->location
, NULL
);
7356 spin_unlock_bh(&hdev
->fd_rule_lock
);
7361 static void hclge_sync_fd_list(struct hclge_dev
*hdev
, struct hlist_head
*hlist
)
7363 struct hclge_fd_rule
*rule
;
7364 struct hlist_node
*node
;
7367 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
))
7370 spin_lock_bh(&hdev
->fd_rule_lock
);
7372 hlist_for_each_entry_safe(rule
, node
, hlist
, rule_node
) {
7373 switch (rule
->state
) {
7374 case HCLGE_FD_TO_ADD
:
7375 ret
= hclge_fd_config_rule(hdev
, rule
);
7378 rule
->state
= HCLGE_FD_ACTIVE
;
7380 case HCLGE_FD_TO_DEL
:
7381 ret
= hclge_fd_tcam_config(hdev
, HCLGE_FD_STAGE_1
, true,
7382 rule
->location
, NULL
, false);
7385 hclge_fd_dec_rule_cnt(hdev
, rule
->location
);
7386 hclge_fd_free_node(hdev
, rule
);
7395 set_bit(HCLGE_STATE_FD_TBL_CHANGED
, &hdev
->state
);
7397 spin_unlock_bh(&hdev
->fd_rule_lock
);
7400 static void hclge_sync_fd_table(struct hclge_dev
*hdev
)
7402 if (!hnae3_ae_dev_fd_supported(hdev
->ae_dev
))
7405 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
)) {
7406 bool clear_list
= hdev
->fd_active_type
== HCLGE_FD_ARFS_ACTIVE
;
7408 hclge_clear_fd_rules_in_list(hdev
, clear_list
);
7411 hclge_sync_fd_user_def_cfg(hdev
, false);
7413 hclge_sync_fd_list(hdev
, &hdev
->fd_rule_list
);
7416 static bool hclge_get_hw_reset_stat(struct hnae3_handle
*handle
)
7418 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7419 struct hclge_dev
*hdev
= vport
->back
;
7421 return hclge_read_dev(&hdev
->hw
, HCLGE_GLOBAL_RESET_REG
) ||
7422 hclge_read_dev(&hdev
->hw
, HCLGE_FUN_RST_ING
);
7425 static bool hclge_get_cmdq_stat(struct hnae3_handle
*handle
)
7427 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7428 struct hclge_dev
*hdev
= vport
->back
;
7430 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
7433 static bool hclge_ae_dev_resetting(struct hnae3_handle
*handle
)
7435 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7436 struct hclge_dev
*hdev
= vport
->back
;
7438 return test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
7441 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle
*handle
)
7443 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7444 struct hclge_dev
*hdev
= vport
->back
;
7446 return hdev
->rst_stats
.hw_reset_done_cnt
;
7449 static void hclge_enable_fd(struct hnae3_handle
*handle
, bool enable
)
7451 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7452 struct hclge_dev
*hdev
= vport
->back
;
7454 hdev
->fd_en
= enable
;
7457 set_bit(HCLGE_STATE_FD_CLEAR_ALL
, &hdev
->state
);
7459 hclge_restore_fd_entries(handle
);
7461 hclge_task_schedule(hdev
, 0);
7464 static void hclge_cfg_mac_mode(struct hclge_dev
*hdev
, bool enable
)
7466 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7468 struct hclge_desc desc
;
7469 struct hclge_config_mac_mode_cmd
*req
=
7470 (struct hclge_config_mac_mode_cmd
*)desc
.data
;
7474 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, false);
7477 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_EN_B
, 1U);
7478 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_EN_B
, 1U);
7479 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_TX_B
, 1U);
7480 hnae3_set_bit(loop_en
, HCLGE_MAC_PAD_RX_B
, 1U);
7481 hnae3_set_bit(loop_en
, HCLGE_MAC_FCS_TX_B
, 1U);
7482 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_B
, 1U);
7483 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_FCS_STRIP_B
, 1U);
7484 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B
, 1U);
7485 hnae3_set_bit(loop_en
, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B
, 1U);
7486 hnae3_set_bit(loop_en
, HCLGE_MAC_TX_UNDER_MIN_ERR_B
, 1U);
7489 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7491 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7493 dev_err(&hdev
->pdev
->dev
,
7494 "mac enable fail, ret =%d.\n", ret
);
7499 hclge_mac_link_status_wait(hdev
, HCLGE_LINK_STATUS_DOWN
,
7500 HCLGE_LINK_STATUS_WAIT_CNT
);
7503 static int hclge_config_switch_param(struct hclge_dev
*hdev
, int vfid
,
7504 u8 switch_param
, u8 param_mask
)
7506 struct hclge_mac_vlan_switch_cmd
*req
;
7507 struct hclge_desc desc
;
7511 func_id
= hclge_get_port_number(HOST_PORT
, 0, vfid
, 0);
7512 req
= (struct hclge_mac_vlan_switch_cmd
*)desc
.data
;
7514 /* read current config parameter */
7515 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM
,
7517 req
->roce_sel
= HCLGE_MAC_VLAN_NIC_SEL
;
7518 req
->func_id
= cpu_to_le32(func_id
);
7520 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7522 dev_err(&hdev
->pdev
->dev
,
7523 "read mac vlan switch parameter fail, ret = %d\n", ret
);
7527 /* modify and write new config parameter */
7528 hclge_comm_cmd_reuse_desc(&desc
, false);
7529 req
->switch_param
= (req
->switch_param
& param_mask
) | switch_param
;
7530 req
->param_mask
= param_mask
;
7532 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7534 dev_err(&hdev
->pdev
->dev
,
7535 "set mac vlan switch parameter fail, ret = %d\n", ret
);
7539 static void hclge_phy_link_status_wait(struct hclge_dev
*hdev
,
7542 #define HCLGE_PHY_LINK_STATUS_NUM 200
7544 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7549 ret
= phy_read_status(phydev
);
7551 dev_err(&hdev
->pdev
->dev
,
7552 "phy update link status fail, ret = %d\n", ret
);
7556 if (phydev
->link
== link_ret
)
7559 msleep(HCLGE_LINK_STATUS_MS
);
7560 } while (++i
< HCLGE_PHY_LINK_STATUS_NUM
);
7563 static int hclge_mac_link_status_wait(struct hclge_dev
*hdev
, int link_ret
,
7571 ret
= hclge_get_mac_link_status(hdev
, &link_status
);
7574 if (link_status
== link_ret
)
7577 msleep(HCLGE_LINK_STATUS_MS
);
7578 } while (++i
< wait_cnt
);
7582 static int hclge_mac_phy_link_status_wait(struct hclge_dev
*hdev
, bool en
,
7585 #define HCLGE_MAC_LINK_STATUS_NUM 100
7589 link_ret
= en
? HCLGE_LINK_STATUS_UP
: HCLGE_LINK_STATUS_DOWN
;
7592 hclge_phy_link_status_wait(hdev
, link_ret
);
7594 return hclge_mac_link_status_wait(hdev
, link_ret
,
7595 HCLGE_MAC_LINK_STATUS_NUM
);
7598 static int hclge_set_app_loopback(struct hclge_dev
*hdev
, bool en
)
7600 struct hclge_config_mac_mode_cmd
*req
;
7601 struct hclge_desc desc
;
7605 req
= (struct hclge_config_mac_mode_cmd
*)&desc
.data
[0];
7606 /* 1 Read out the MAC mode config at first */
7607 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAC_MODE
, true);
7608 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7610 dev_err(&hdev
->pdev
->dev
,
7611 "mac loopback get fail, ret =%d.\n", ret
);
7615 /* 2 Then setup the loopback flag */
7616 loop_en
= le32_to_cpu(req
->txrx_pad_fcs_loop_en
);
7617 hnae3_set_bit(loop_en
, HCLGE_MAC_APP_LP_B
, en
? 1 : 0);
7619 req
->txrx_pad_fcs_loop_en
= cpu_to_le32(loop_en
);
7621 /* 3 Config mac work mode with loopback flag
7622 * and its original configure parameters
7624 hclge_comm_cmd_reuse_desc(&desc
, false);
7625 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7627 dev_err(&hdev
->pdev
->dev
,
7628 "mac loopback set fail, ret =%d.\n", ret
);
7632 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev
*hdev
, bool en
,
7633 enum hnae3_loop loop_mode
)
7635 struct hclge_common_lb_cmd
*req
;
7636 struct hclge_desc desc
;
7640 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7641 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
, false);
7643 switch (loop_mode
) {
7644 case HNAE3_LOOP_SERIAL_SERDES
:
7645 loop_mode_b
= HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B
;
7647 case HNAE3_LOOP_PARALLEL_SERDES
:
7648 loop_mode_b
= HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B
;
7650 case HNAE3_LOOP_PHY
:
7651 loop_mode_b
= HCLGE_CMD_GE_PHY_INNER_LOOP_B
;
7654 dev_err(&hdev
->pdev
->dev
,
7655 "unsupported loopback mode %d\n", loop_mode
);
7659 req
->mask
= loop_mode_b
;
7661 req
->enable
= loop_mode_b
;
7663 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7665 dev_err(&hdev
->pdev
->dev
,
7666 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7672 static int hclge_cfg_common_loopback_wait(struct hclge_dev
*hdev
)
7674 #define HCLGE_COMMON_LB_RETRY_MS 10
7675 #define HCLGE_COMMON_LB_RETRY_NUM 100
7677 struct hclge_common_lb_cmd
*req
;
7678 struct hclge_desc desc
;
7682 req
= (struct hclge_common_lb_cmd
*)desc
.data
;
7685 msleep(HCLGE_COMMON_LB_RETRY_MS
);
7686 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_COMMON_LOOPBACK
,
7688 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7690 dev_err(&hdev
->pdev
->dev
,
7691 "failed to get loopback done status, ret = %d\n",
7695 } while (++i
< HCLGE_COMMON_LB_RETRY_NUM
&&
7696 !(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
));
7698 if (!(req
->result
& HCLGE_CMD_COMMON_LB_DONE_B
)) {
7699 dev_err(&hdev
->pdev
->dev
, "wait loopback timeout\n");
7701 } else if (!(req
->result
& HCLGE_CMD_COMMON_LB_SUCCESS_B
)) {
7702 dev_err(&hdev
->pdev
->dev
, "failed to do loopback test\n");
7709 static int hclge_cfg_common_loopback(struct hclge_dev
*hdev
, bool en
,
7710 enum hnae3_loop loop_mode
)
7714 ret
= hclge_cfg_common_loopback_cmd_send(hdev
, en
, loop_mode
);
7718 return hclge_cfg_common_loopback_wait(hdev
);
7721 static int hclge_set_common_loopback(struct hclge_dev
*hdev
, bool en
,
7722 enum hnae3_loop loop_mode
)
7726 ret
= hclge_cfg_common_loopback(hdev
, en
, loop_mode
);
7730 hclge_cfg_mac_mode(hdev
, en
);
7732 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, false);
7734 dev_err(&hdev
->pdev
->dev
,
7735 "serdes loopback config mac mode timeout\n");
7740 static int hclge_enable_phy_loopback(struct hclge_dev
*hdev
,
7741 struct phy_device
*phydev
)
7745 if (!phydev
->suspended
) {
7746 ret
= phy_suspend(phydev
);
7751 ret
= phy_resume(phydev
);
7755 return phy_loopback(phydev
, true);
7758 static int hclge_disable_phy_loopback(struct hclge_dev
*hdev
,
7759 struct phy_device
*phydev
)
7763 ret
= phy_loopback(phydev
, false);
7767 return phy_suspend(phydev
);
7770 static int hclge_set_phy_loopback(struct hclge_dev
*hdev
, bool en
)
7772 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
7776 if (hnae3_dev_phy_imp_supported(hdev
))
7777 return hclge_set_common_loopback(hdev
, en
,
7783 ret
= hclge_enable_phy_loopback(hdev
, phydev
);
7785 ret
= hclge_disable_phy_loopback(hdev
, phydev
);
7787 dev_err(&hdev
->pdev
->dev
,
7788 "set phy loopback fail, ret = %d\n", ret
);
7792 hclge_cfg_mac_mode(hdev
, en
);
7794 ret
= hclge_mac_phy_link_status_wait(hdev
, en
, true);
7796 dev_err(&hdev
->pdev
->dev
,
7797 "phy loopback config mac mode timeout\n");
7802 static int hclge_tqp_enable_cmd_send(struct hclge_dev
*hdev
, u16 tqp_id
,
7803 u16 stream_id
, bool enable
)
7805 struct hclge_desc desc
;
7806 struct hclge_cfg_com_tqp_queue_cmd
*req
=
7807 (struct hclge_cfg_com_tqp_queue_cmd
*)desc
.data
;
7809 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_COM_TQP_QUEUE
, false);
7810 req
->tqp_id
= cpu_to_le16(tqp_id
);
7811 req
->stream_id
= cpu_to_le16(stream_id
);
7813 req
->enable
|= 1U << HCLGE_TQP_ENABLE_B
;
7815 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
7818 static int hclge_tqp_enable(struct hnae3_handle
*handle
, bool enable
)
7820 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7821 struct hclge_dev
*hdev
= vport
->back
;
7825 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
7826 ret
= hclge_tqp_enable_cmd_send(hdev
, i
, 0, enable
);
7833 static int hclge_set_loopback(struct hnae3_handle
*handle
,
7834 enum hnae3_loop loop_mode
, bool en
)
7836 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7837 struct hclge_dev
*hdev
= vport
->back
;
7840 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7841 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7842 * the same, the packets are looped back in the SSU. If SSU loopback
7843 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7845 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
) {
7846 u8 switch_param
= en
? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B
);
7848 ret
= hclge_config_switch_param(hdev
, PF_VPORT_ID
, switch_param
,
7849 HCLGE_SWITCH_ALW_LPBK_MASK
);
7854 switch (loop_mode
) {
7855 case HNAE3_LOOP_APP
:
7856 ret
= hclge_set_app_loopback(hdev
, en
);
7858 case HNAE3_LOOP_SERIAL_SERDES
:
7859 case HNAE3_LOOP_PARALLEL_SERDES
:
7860 ret
= hclge_set_common_loopback(hdev
, en
, loop_mode
);
7862 case HNAE3_LOOP_PHY
:
7863 ret
= hclge_set_phy_loopback(hdev
, en
);
7865 case HNAE3_LOOP_EXTERNAL
:
7869 dev_err(&hdev
->pdev
->dev
,
7870 "loop_mode %d is not supported\n", loop_mode
);
7877 ret
= hclge_tqp_enable(handle
, en
);
7879 dev_err(&hdev
->pdev
->dev
, "failed to %s tqp in loopback, ret = %d\n",
7880 en
? "enable" : "disable", ret
);
7885 static int hclge_set_default_loopback(struct hclge_dev
*hdev
)
7889 ret
= hclge_set_app_loopback(hdev
, false);
7893 ret
= hclge_cfg_common_loopback(hdev
, false, HNAE3_LOOP_SERIAL_SERDES
);
7897 return hclge_cfg_common_loopback(hdev
, false,
7898 HNAE3_LOOP_PARALLEL_SERDES
);
7901 static void hclge_flush_link_update(struct hclge_dev
*hdev
)
7903 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7905 unsigned long last
= hdev
->serv_processed_cnt
;
7908 while (test_bit(HCLGE_STATE_LINK_UPDATING
, &hdev
->state
) &&
7909 i
++ < HCLGE_FLUSH_LINK_TIMEOUT
&&
7910 last
== hdev
->serv_processed_cnt
)
7914 static void hclge_set_timer_task(struct hnae3_handle
*handle
, bool enable
)
7916 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7917 struct hclge_dev
*hdev
= vport
->back
;
7920 hclge_task_schedule(hdev
, 0);
7922 /* Set the DOWN flag here to disable link updating */
7923 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7925 /* flush memory to make sure DOWN is seen by service task */
7926 smp_mb__before_atomic();
7927 hclge_flush_link_update(hdev
);
7931 static int hclge_ae_start(struct hnae3_handle
*handle
)
7933 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7934 struct hclge_dev
*hdev
= vport
->back
;
7937 hclge_cfg_mac_mode(hdev
, true);
7938 clear_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7939 hdev
->hw
.mac
.link
= 0;
7941 /* reset tqp stats */
7942 hclge_comm_reset_tqp_stats(handle
);
7944 hclge_mac_start_phy(hdev
);
7949 static void hclge_ae_stop(struct hnae3_handle
*handle
)
7951 struct hclge_vport
*vport
= hclge_get_vport(handle
);
7952 struct hclge_dev
*hdev
= vport
->back
;
7954 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
7955 spin_lock_bh(&hdev
->fd_rule_lock
);
7956 hclge_clear_arfs_rules(hdev
);
7957 spin_unlock_bh(&hdev
->fd_rule_lock
);
7959 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7960 * so it only need to stop phy here.
7962 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
)) {
7963 hclge_pfc_pause_en_cfg(hdev
, HCLGE_PFC_TX_RX_DISABLE
,
7965 if (hdev
->reset_type
!= HNAE3_FUNC_RESET
&&
7966 hdev
->reset_type
!= HNAE3_FLR_RESET
) {
7967 hclge_mac_stop_phy(hdev
);
7968 hclge_update_link_status(hdev
);
7973 hclge_reset_tqp(handle
);
7975 hclge_config_mac_tnl_int(hdev
, false);
7978 hclge_cfg_mac_mode(hdev
, false);
7980 hclge_mac_stop_phy(hdev
);
7982 /* reset tqp stats */
7983 hclge_comm_reset_tqp_stats(handle
);
7984 hclge_update_link_status(hdev
);
7987 int hclge_vport_start(struct hclge_vport
*vport
)
7989 struct hclge_dev
*hdev
= vport
->back
;
7991 set_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
7992 set_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
7993 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
7994 vport
->last_active_jiffies
= jiffies
;
7995 vport
->need_notify
= 0;
7997 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
)) {
7998 if (vport
->vport_id
) {
7999 hclge_restore_mac_table_common(vport
);
8000 hclge_restore_vport_vlan_table(vport
);
8002 hclge_restore_hw_table(hdev
);
8006 clear_bit(vport
->vport_id
, hdev
->vport_config_block
);
8011 void hclge_vport_stop(struct hclge_vport
*vport
)
8013 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
8014 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
8015 vport
->need_notify
= 0;
8018 static int hclge_client_start(struct hnae3_handle
*handle
)
8020 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8022 return hclge_vport_start(vport
);
8025 static void hclge_client_stop(struct hnae3_handle
*handle
)
8027 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8029 hclge_vport_stop(vport
);
8032 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport
*vport
,
8033 u16 cmdq_resp
, u8 resp_code
,
8034 enum hclge_mac_vlan_tbl_opcode op
)
8036 struct hclge_dev
*hdev
= vport
->back
;
8039 dev_err(&hdev
->pdev
->dev
,
8040 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8045 if (op
== HCLGE_MAC_VLAN_ADD
) {
8046 if (!resp_code
|| resp_code
== 1)
8048 else if (resp_code
== HCLGE_ADD_UC_OVERFLOW
||
8049 resp_code
== HCLGE_ADD_MC_OVERFLOW
)
8052 dev_err(&hdev
->pdev
->dev
,
8053 "add mac addr failed for undefined, code=%u.\n",
8056 } else if (op
== HCLGE_MAC_VLAN_REMOVE
) {
8059 } else if (resp_code
== 1) {
8060 dev_dbg(&hdev
->pdev
->dev
,
8061 "remove mac addr failed for miss.\n");
8065 dev_err(&hdev
->pdev
->dev
,
8066 "remove mac addr failed for undefined, code=%u.\n",
8069 } else if (op
== HCLGE_MAC_VLAN_LKUP
) {
8072 } else if (resp_code
== 1) {
8073 dev_dbg(&hdev
->pdev
->dev
,
8074 "lookup mac addr failed for miss.\n");
8078 dev_err(&hdev
->pdev
->dev
,
8079 "lookup mac addr failed for undefined, code=%u.\n",
8084 dev_err(&hdev
->pdev
->dev
,
8085 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op
);
8090 static int hclge_update_desc_vfid(struct hclge_desc
*desc
, int vfid
, bool clr
)
8092 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8094 unsigned int word_num
;
8095 unsigned int bit_num
;
8097 if (vfid
> 255 || vfid
< 0)
8100 if (vfid
>= 0 && vfid
< HCLGE_VF_NUM_IN_FIRST_DESC
) {
8101 word_num
= vfid
/ 32;
8102 bit_num
= vfid
% 32;
8104 desc
[1].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8106 desc
[1].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8108 word_num
= (vfid
- HCLGE_VF_NUM_IN_FIRST_DESC
) / 32;
8109 bit_num
= vfid
% 32;
8111 desc
[2].data
[word_num
] &= cpu_to_le32(~(1 << bit_num
));
8113 desc
[2].data
[word_num
] |= cpu_to_le32(1 << bit_num
);
8119 static bool hclge_is_all_function_id_zero(struct hclge_desc
*desc
)
8121 #define HCLGE_DESC_NUMBER 3
8122 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8125 for (i
= 1; i
< HCLGE_DESC_NUMBER
; i
++)
8126 for (j
= 0; j
< HCLGE_FUNC_NUMBER_PER_DESC
; j
++)
8127 if (desc
[i
].data
[j
])
8133 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd
*new_req
,
8134 const u8
*addr
, bool is_mc
)
8136 const unsigned char *mac_addr
= addr
;
8137 u32 high_val
= mac_addr
[2] << 16 | (mac_addr
[3] << 24) |
8138 (mac_addr
[0]) | (mac_addr
[1] << 8);
8139 u32 low_val
= mac_addr
[4] | (mac_addr
[5] << 8);
8141 hnae3_set_bit(new_req
->flags
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8143 hnae3_set_bit(new_req
->entry_type
, HCLGE_MAC_VLAN_BIT1_EN_B
, 1);
8144 hnae3_set_bit(new_req
->mc_mac_en
, HCLGE_MAC_VLAN_BIT0_EN_B
, 1);
8147 new_req
->mac_addr_hi32
= cpu_to_le32(high_val
);
8148 new_req
->mac_addr_lo16
= cpu_to_le16(low_val
& 0xffff);
8151 static int hclge_remove_mac_vlan_tbl(struct hclge_vport
*vport
,
8152 struct hclge_mac_vlan_tbl_entry_cmd
*req
)
8154 struct hclge_dev
*hdev
= vport
->back
;
8155 struct hclge_desc desc
;
8160 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_REMOVE
, false);
8162 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8164 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8166 dev_err(&hdev
->pdev
->dev
,
8167 "del mac addr failed for cmd_send, ret =%d.\n",
8171 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8172 retval
= le16_to_cpu(desc
.retval
);
8174 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8175 HCLGE_MAC_VLAN_REMOVE
);
8178 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport
*vport
,
8179 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8180 struct hclge_desc
*desc
,
8183 struct hclge_dev
*hdev
= vport
->back
;
8188 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_MAC_VLAN_ADD
, true);
8190 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8191 memcpy(desc
[0].data
,
8193 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8194 hclge_cmd_setup_basic_desc(&desc
[1],
8195 HCLGE_OPC_MAC_VLAN_ADD
,
8197 desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8198 hclge_cmd_setup_basic_desc(&desc
[2],
8199 HCLGE_OPC_MAC_VLAN_ADD
,
8201 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 3);
8203 memcpy(desc
[0].data
,
8205 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8206 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 1);
8209 dev_err(&hdev
->pdev
->dev
,
8210 "lookup mac addr failed for cmd_send, ret =%d.\n",
8214 resp_code
= (le32_to_cpu(desc
[0].data
[0]) >> 8) & 0xff;
8215 retval
= le16_to_cpu(desc
[0].retval
);
8217 return hclge_get_mac_vlan_cmd_status(vport
, retval
, resp_code
,
8218 HCLGE_MAC_VLAN_LKUP
);
8221 static int hclge_add_mac_vlan_tbl(struct hclge_vport
*vport
,
8222 struct hclge_mac_vlan_tbl_entry_cmd
*req
,
8223 struct hclge_desc
*mc_desc
)
8225 struct hclge_dev
*hdev
= vport
->back
;
8232 struct hclge_desc desc
;
8234 hclge_cmd_setup_basic_desc(&desc
,
8235 HCLGE_OPC_MAC_VLAN_ADD
,
8237 memcpy(desc
.data
, req
,
8238 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8239 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8240 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
8241 retval
= le16_to_cpu(desc
.retval
);
8243 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8245 HCLGE_MAC_VLAN_ADD
);
8247 hclge_comm_cmd_reuse_desc(&mc_desc
[0], false);
8248 mc_desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8249 hclge_comm_cmd_reuse_desc(&mc_desc
[1], false);
8250 mc_desc
[1].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
8251 hclge_comm_cmd_reuse_desc(&mc_desc
[2], false);
8252 mc_desc
[2].flag
&= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT
);
8253 memcpy(mc_desc
[0].data
, req
,
8254 sizeof(struct hclge_mac_vlan_tbl_entry_cmd
));
8255 ret
= hclge_cmd_send(&hdev
->hw
, mc_desc
, 3);
8256 resp_code
= (le32_to_cpu(mc_desc
[0].data
[0]) >> 8) & 0xff;
8257 retval
= le16_to_cpu(mc_desc
[0].retval
);
8259 cfg_status
= hclge_get_mac_vlan_cmd_status(vport
, retval
,
8261 HCLGE_MAC_VLAN_ADD
);
8265 dev_err(&hdev
->pdev
->dev
,
8266 "add mac addr failed for cmd_send, ret =%d.\n",
8274 static int hclge_set_umv_space(struct hclge_dev
*hdev
, u16 space_size
,
8275 u16
*allocated_size
)
8277 struct hclge_umv_spc_alc_cmd
*req
;
8278 struct hclge_desc desc
;
8281 req
= (struct hclge_umv_spc_alc_cmd
*)desc
.data
;
8282 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_ALLOCATE
, false);
8284 req
->space_size
= cpu_to_le32(space_size
);
8286 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
8288 dev_err(&hdev
->pdev
->dev
, "failed to set umv space, ret = %d\n",
8293 *allocated_size
= le32_to_cpu(desc
.data
[1]);
8298 static int hclge_init_umv_space(struct hclge_dev
*hdev
)
8300 u16 allocated_size
= 0;
8303 ret
= hclge_set_umv_space(hdev
, hdev
->wanted_umv_size
, &allocated_size
);
8307 if (allocated_size
< hdev
->wanted_umv_size
)
8308 dev_warn(&hdev
->pdev
->dev
,
8309 "failed to alloc umv space, want %u, get %u\n",
8310 hdev
->wanted_umv_size
, allocated_size
);
8312 hdev
->max_umv_size
= allocated_size
;
8313 hdev
->priv_umv_size
= hdev
->max_umv_size
/ (hdev
->num_alloc_vport
+ 1);
8314 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8315 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8317 if (hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8318 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B
, hdev
->ae_dev
->caps
);
8323 static void hclge_reset_umv_space(struct hclge_dev
*hdev
)
8325 struct hclge_vport
*vport
;
8328 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
8329 vport
= &hdev
->vport
[i
];
8330 vport
->used_umv_num
= 0;
8333 mutex_lock(&hdev
->vport_lock
);
8334 hdev
->share_umv_size
= hdev
->priv_umv_size
+
8335 hdev
->max_umv_size
% (hdev
->num_alloc_vport
+ 1);
8336 mutex_unlock(&hdev
->vport_lock
);
8338 hdev
->used_mc_mac_num
= 0;
8341 static bool hclge_is_umv_space_full(struct hclge_vport
*vport
, bool need_lock
)
8343 struct hclge_dev
*hdev
= vport
->back
;
8347 mutex_lock(&hdev
->vport_lock
);
8349 is_full
= (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8350 hdev
->share_umv_size
== 0);
8353 mutex_unlock(&hdev
->vport_lock
);
8358 static void hclge_update_umv_space(struct hclge_vport
*vport
, bool is_free
)
8360 struct hclge_dev
*hdev
= vport
->back
;
8363 if (vport
->used_umv_num
> hdev
->priv_umv_size
)
8364 hdev
->share_umv_size
++;
8366 if (vport
->used_umv_num
> 0)
8367 vport
->used_umv_num
--;
8369 if (vport
->used_umv_num
>= hdev
->priv_umv_size
&&
8370 hdev
->share_umv_size
> 0)
8371 hdev
->share_umv_size
--;
8372 vport
->used_umv_num
++;
8376 static struct hclge_mac_node
*hclge_find_mac_node(struct list_head
*list
,
8379 struct hclge_mac_node
*mac_node
, *tmp
;
8381 list_for_each_entry_safe(mac_node
, tmp
, list
, node
)
8382 if (ether_addr_equal(mac_addr
, mac_node
->mac_addr
))
8388 static void hclge_update_mac_node(struct hclge_mac_node
*mac_node
,
8389 enum HCLGE_MAC_NODE_STATE state
)
8392 /* from set_rx_mode or tmp_add_list */
8393 case HCLGE_MAC_TO_ADD
:
8394 if (mac_node
->state
== HCLGE_MAC_TO_DEL
)
8395 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8397 /* only from set_rx_mode */
8398 case HCLGE_MAC_TO_DEL
:
8399 if (mac_node
->state
== HCLGE_MAC_TO_ADD
) {
8400 list_del(&mac_node
->node
);
8403 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8406 /* only from tmp_add_list, the mac_node->state won't be
8409 case HCLGE_MAC_ACTIVE
:
8410 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8411 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8417 int hclge_update_mac_list(struct hclge_vport
*vport
,
8418 enum HCLGE_MAC_NODE_STATE state
,
8419 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8420 const unsigned char *addr
)
8422 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8423 struct hclge_dev
*hdev
= vport
->back
;
8424 struct hclge_mac_node
*mac_node
;
8425 struct list_head
*list
;
8427 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8428 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8430 spin_lock_bh(&vport
->mac_list_lock
);
8432 /* if the mac addr is already in the mac list, no need to add a new
8433 * one into it, just check the mac addr state, convert it to a new
8434 * state, or just remove it, or do nothing.
8436 mac_node
= hclge_find_mac_node(list
, addr
);
8438 hclge_update_mac_node(mac_node
, state
);
8439 spin_unlock_bh(&vport
->mac_list_lock
);
8440 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8444 /* if this address is never added, unnecessary to delete */
8445 if (state
== HCLGE_MAC_TO_DEL
) {
8446 spin_unlock_bh(&vport
->mac_list_lock
);
8447 hnae3_format_mac_addr(format_mac_addr
, addr
);
8448 dev_err(&hdev
->pdev
->dev
,
8449 "failed to delete address %s from mac list\n",
8454 mac_node
= kzalloc(sizeof(*mac_node
), GFP_ATOMIC
);
8456 spin_unlock_bh(&vport
->mac_list_lock
);
8460 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
8462 mac_node
->state
= state
;
8463 ether_addr_copy(mac_node
->mac_addr
, addr
);
8464 list_add_tail(&mac_node
->node
, list
);
8466 spin_unlock_bh(&vport
->mac_list_lock
);
8471 static int hclge_add_uc_addr(struct hnae3_handle
*handle
,
8472 const unsigned char *addr
)
8474 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8476 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_UC
,
8480 int hclge_add_uc_addr_common(struct hclge_vport
*vport
,
8481 const unsigned char *addr
)
8483 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8484 struct hclge_dev
*hdev
= vport
->back
;
8485 struct hclge_mac_vlan_tbl_entry_cmd req
;
8486 struct hclge_desc desc
;
8487 u16 egress_port
= 0;
8490 /* mac addr check */
8491 if (is_zero_ether_addr(addr
) ||
8492 is_broadcast_ether_addr(addr
) ||
8493 is_multicast_ether_addr(addr
)) {
8494 hnae3_format_mac_addr(format_mac_addr
, addr
);
8495 dev_err(&hdev
->pdev
->dev
,
8496 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8497 format_mac_addr
, is_zero_ether_addr(addr
),
8498 is_broadcast_ether_addr(addr
),
8499 is_multicast_ether_addr(addr
));
8503 memset(&req
, 0, sizeof(req
));
8505 hnae3_set_field(egress_port
, HCLGE_MAC_EPORT_VFID_M
,
8506 HCLGE_MAC_EPORT_VFID_S
, vport
->vport_id
);
8508 req
.egress_port
= cpu_to_le16(egress_port
);
8510 hclge_prepare_mac_addr(&req
, addr
, false);
8512 /* Lookup the mac address in the mac_vlan table, and add
8513 * it if the entry is inexistent. Repeated unicast entry
8514 * is not allowed in the mac vlan table.
8516 ret
= hclge_lookup_mac_vlan_tbl(vport
, &req
, &desc
, false);
8517 if (ret
== -ENOENT
) {
8518 mutex_lock(&hdev
->vport_lock
);
8519 if (!hclge_is_umv_space_full(vport
, false)) {
8520 ret
= hclge_add_mac_vlan_tbl(vport
, &req
, NULL
);
8522 hclge_update_umv_space(vport
, false);
8523 mutex_unlock(&hdev
->vport_lock
);
8526 mutex_unlock(&hdev
->vport_lock
);
8528 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
))
8529 dev_err(&hdev
->pdev
->dev
, "UC MAC table full(%u)\n",
8530 hdev
->priv_umv_size
);
8535 /* check if we just hit the duplicate */
8542 static int hclge_rm_uc_addr(struct hnae3_handle
*handle
,
8543 const unsigned char *addr
)
8545 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8547 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_UC
,
8551 int hclge_rm_uc_addr_common(struct hclge_vport
*vport
,
8552 const unsigned char *addr
)
8554 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8555 struct hclge_dev
*hdev
= vport
->back
;
8556 struct hclge_mac_vlan_tbl_entry_cmd req
;
8559 /* mac addr check */
8560 if (is_zero_ether_addr(addr
) ||
8561 is_broadcast_ether_addr(addr
) ||
8562 is_multicast_ether_addr(addr
)) {
8563 hnae3_format_mac_addr(format_mac_addr
, addr
);
8564 dev_dbg(&hdev
->pdev
->dev
, "Remove mac err! invalid mac:%s.\n",
8569 memset(&req
, 0, sizeof(req
));
8570 hnae3_set_bit(req
.entry_type
, HCLGE_MAC_VLAN_BIT0_EN_B
, 0);
8571 hclge_prepare_mac_addr(&req
, addr
, false);
8572 ret
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8573 if (!ret
|| ret
== -ENOENT
) {
8574 mutex_lock(&hdev
->vport_lock
);
8575 hclge_update_umv_space(vport
, true);
8576 mutex_unlock(&hdev
->vport_lock
);
8583 static int hclge_add_mc_addr(struct hnae3_handle
*handle
,
8584 const unsigned char *addr
)
8586 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8588 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_ADD
, HCLGE_MAC_ADDR_MC
,
8592 int hclge_add_mc_addr_common(struct hclge_vport
*vport
,
8593 const unsigned char *addr
)
8595 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8596 struct hclge_dev
*hdev
= vport
->back
;
8597 struct hclge_mac_vlan_tbl_entry_cmd req
;
8598 struct hclge_desc desc
[3];
8599 bool is_new_addr
= false;
8602 /* mac addr check */
8603 if (!is_multicast_ether_addr(addr
)) {
8604 hnae3_format_mac_addr(format_mac_addr
, addr
);
8605 dev_err(&hdev
->pdev
->dev
,
8606 "Add mc mac err! invalid mac:%s.\n",
8610 memset(&req
, 0, sizeof(req
));
8611 hclge_prepare_mac_addr(&req
, addr
, true);
8612 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8614 if (hnae3_ae_dev_mc_mac_mng_supported(hdev
->ae_dev
) &&
8615 hdev
->used_mc_mac_num
>=
8616 hdev
->ae_dev
->dev_specs
.mc_mac_size
)
8621 /* This mac addr do not exist, add new entry for it */
8622 memset(desc
[0].data
, 0, sizeof(desc
[0].data
));
8623 memset(desc
[1].data
, 0, sizeof(desc
[0].data
));
8624 memset(desc
[2].data
, 0, sizeof(desc
[0].data
));
8626 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, false);
8629 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8630 if (status
== -ENOSPC
)
8632 else if (!status
&& is_new_addr
)
8633 hdev
->used_mc_mac_num
++;
8638 /* if already overflow, not to print each time */
8639 if (!(vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
)) {
8640 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8641 dev_err(&hdev
->pdev
->dev
, "mc mac vlan table is full\n");
8647 static int hclge_rm_mc_addr(struct hnae3_handle
*handle
,
8648 const unsigned char *addr
)
8650 struct hclge_vport
*vport
= hclge_get_vport(handle
);
8652 return hclge_update_mac_list(vport
, HCLGE_MAC_TO_DEL
, HCLGE_MAC_ADDR_MC
,
8656 int hclge_rm_mc_addr_common(struct hclge_vport
*vport
,
8657 const unsigned char *addr
)
8659 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
8660 struct hclge_dev
*hdev
= vport
->back
;
8661 struct hclge_mac_vlan_tbl_entry_cmd req
;
8662 enum hclge_comm_cmd_status status
;
8663 struct hclge_desc desc
[3];
8665 /* mac addr check */
8666 if (!is_multicast_ether_addr(addr
)) {
8667 hnae3_format_mac_addr(format_mac_addr
, addr
);
8668 dev_dbg(&hdev
->pdev
->dev
,
8669 "Remove mc mac err! invalid mac:%s.\n",
8674 memset(&req
, 0, sizeof(req
));
8675 hclge_prepare_mac_addr(&req
, addr
, true);
8676 status
= hclge_lookup_mac_vlan_tbl(vport
, &req
, desc
, true);
8678 /* This mac addr exist, remove this handle's VFID for it */
8679 status
= hclge_update_desc_vfid(desc
, vport
->vport_id
, true);
8683 if (hclge_is_all_function_id_zero(desc
)) {
8684 /* All the vfid is zero, so need to delete this entry */
8685 status
= hclge_remove_mac_vlan_tbl(vport
, &req
);
8687 hdev
->used_mc_mac_num
--;
8689 /* Not all the vfid is zero, update the vfid */
8690 status
= hclge_add_mac_vlan_tbl(vport
, &req
, desc
);
8692 } else if (status
== -ENOENT
) {
8699 static void hclge_sync_vport_mac_list(struct hclge_vport
*vport
,
8700 struct list_head
*list
,
8701 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8703 int (*sync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8704 struct hclge_mac_node
*mac_node
, *tmp
;
8707 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8708 sync
= hclge_add_uc_addr_common
;
8710 sync
= hclge_add_mc_addr_common
;
8712 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8713 ret
= sync(vport
, mac_node
->mac_addr
);
8715 mac_node
->state
= HCLGE_MAC_ACTIVE
;
8717 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8720 /* If one unicast mac address is existing in hardware,
8721 * we need to try whether other unicast mac addresses
8722 * are new addresses that can be added.
8723 * Multicast mac address can be reusable, even though
8724 * there is no space to add new multicast mac address,
8725 * we should check whether other mac addresses are
8726 * existing in hardware for reuse.
8728 if ((mac_type
== HCLGE_MAC_ADDR_UC
&& ret
!= -EEXIST
) ||
8729 (mac_type
== HCLGE_MAC_ADDR_MC
&& ret
!= -ENOSPC
))
8735 static void hclge_unsync_vport_mac_list(struct hclge_vport
*vport
,
8736 struct list_head
*list
,
8737 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8739 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8740 struct hclge_mac_node
*mac_node
, *tmp
;
8743 if (mac_type
== HCLGE_MAC_ADDR_UC
)
8744 unsync
= hclge_rm_uc_addr_common
;
8746 unsync
= hclge_rm_mc_addr_common
;
8748 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8749 ret
= unsync(vport
, mac_node
->mac_addr
);
8750 if (!ret
|| ret
== -ENOENT
) {
8751 list_del(&mac_node
->node
);
8754 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
,
8761 static bool hclge_sync_from_add_list(struct list_head
*add_list
,
8762 struct list_head
*mac_list
)
8764 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8765 bool all_added
= true;
8767 list_for_each_entry_safe(mac_node
, tmp
, add_list
, node
) {
8768 if (mac_node
->state
== HCLGE_MAC_TO_ADD
)
8771 /* if the mac address from tmp_add_list is not in the
8772 * uc/mc_mac_list, it means have received a TO_DEL request
8773 * during the time window of adding the mac address into mac
8774 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8775 * then it will be removed at next time. else it must be TO_ADD,
8776 * this address hasn't been added into mac table,
8777 * so just remove the mac node.
8779 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8781 hclge_update_mac_node(new_node
, mac_node
->state
);
8782 list_del(&mac_node
->node
);
8784 } else if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
8785 mac_node
->state
= HCLGE_MAC_TO_DEL
;
8786 list_move_tail(&mac_node
->node
, mac_list
);
8788 list_del(&mac_node
->node
);
8796 static void hclge_sync_from_del_list(struct list_head
*del_list
,
8797 struct list_head
*mac_list
)
8799 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8801 list_for_each_entry_safe(mac_node
, tmp
, del_list
, node
) {
8802 new_node
= hclge_find_mac_node(mac_list
, mac_node
->mac_addr
);
8804 /* If the mac addr exists in the mac list, it means
8805 * received a new TO_ADD request during the time window
8806 * of configuring the mac address. For the mac node
8807 * state is TO_ADD, and the address is already in the
8808 * in the hardware(due to delete fail), so we just need
8809 * to change the mac node state to ACTIVE.
8811 new_node
->state
= HCLGE_MAC_ACTIVE
;
8812 list_del(&mac_node
->node
);
8815 list_move_tail(&mac_node
->node
, mac_list
);
8820 static void hclge_update_overflow_flags(struct hclge_vport
*vport
,
8821 enum HCLGE_MAC_ADDR_TYPE mac_type
,
8824 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
8826 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_UPE
;
8828 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_UPE
;
8831 vport
->overflow_promisc_flags
&= ~HNAE3_OVERFLOW_MPE
;
8833 vport
->overflow_promisc_flags
|= HNAE3_OVERFLOW_MPE
;
8837 static void hclge_sync_vport_mac_table(struct hclge_vport
*vport
,
8838 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8840 struct hclge_mac_node
*mac_node
, *tmp
, *new_node
;
8841 struct list_head tmp_add_list
, tmp_del_list
;
8842 struct list_head
*list
;
8845 INIT_LIST_HEAD(&tmp_add_list
);
8846 INIT_LIST_HEAD(&tmp_del_list
);
8848 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8849 * we can add/delete these mac addr outside the spin lock
8851 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
8852 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
8854 spin_lock_bh(&vport
->mac_list_lock
);
8856 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
8857 switch (mac_node
->state
) {
8858 case HCLGE_MAC_TO_DEL
:
8859 list_move_tail(&mac_node
->node
, &tmp_del_list
);
8861 case HCLGE_MAC_TO_ADD
:
8862 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
8865 ether_addr_copy(new_node
->mac_addr
, mac_node
->mac_addr
);
8866 new_node
->state
= mac_node
->state
;
8867 list_add_tail(&new_node
->node
, &tmp_add_list
);
8875 spin_unlock_bh(&vport
->mac_list_lock
);
8877 /* delete first, in order to get max mac table space for adding */
8878 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
8879 hclge_sync_vport_mac_list(vport
, &tmp_add_list
, mac_type
);
8881 /* if some mac addresses were added/deleted fail, move back to the
8882 * mac_list, and retry at next time.
8884 spin_lock_bh(&vport
->mac_list_lock
);
8886 hclge_sync_from_del_list(&tmp_del_list
, list
);
8887 all_added
= hclge_sync_from_add_list(&tmp_add_list
, list
);
8889 spin_unlock_bh(&vport
->mac_list_lock
);
8891 hclge_update_overflow_flags(vport
, mac_type
, all_added
);
8894 static bool hclge_need_sync_mac_table(struct hclge_vport
*vport
)
8896 struct hclge_dev
*hdev
= vport
->back
;
8898 if (test_bit(vport
->vport_id
, hdev
->vport_config_block
))
8901 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
))
8907 static void hclge_sync_mac_table(struct hclge_dev
*hdev
)
8911 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
8912 struct hclge_vport
*vport
= &hdev
->vport
[i
];
8914 if (!hclge_need_sync_mac_table(vport
))
8917 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_UC
);
8918 hclge_sync_vport_mac_table(vport
, HCLGE_MAC_ADDR_MC
);
8922 static void hclge_build_del_list(struct list_head
*list
,
8924 struct list_head
*tmp_del_list
)
8926 struct hclge_mac_node
*mac_cfg
, *tmp
;
8928 list_for_each_entry_safe(mac_cfg
, tmp
, list
, node
) {
8929 switch (mac_cfg
->state
) {
8930 case HCLGE_MAC_TO_DEL
:
8931 case HCLGE_MAC_ACTIVE
:
8932 list_move_tail(&mac_cfg
->node
, tmp_del_list
);
8934 case HCLGE_MAC_TO_ADD
:
8936 list_del(&mac_cfg
->node
);
8944 static void hclge_unsync_del_list(struct hclge_vport
*vport
,
8945 int (*unsync
)(struct hclge_vport
*vport
,
8946 const unsigned char *addr
),
8948 struct list_head
*tmp_del_list
)
8950 struct hclge_mac_node
*mac_cfg
, *tmp
;
8953 list_for_each_entry_safe(mac_cfg
, tmp
, tmp_del_list
, node
) {
8954 ret
= unsync(vport
, mac_cfg
->mac_addr
);
8955 if (!ret
|| ret
== -ENOENT
) {
8956 /* clear all mac addr from hardware, but remain these
8957 * mac addr in the mac list, and restore them after
8958 * vf reset finished.
8961 mac_cfg
->state
== HCLGE_MAC_ACTIVE
) {
8962 mac_cfg
->state
= HCLGE_MAC_TO_ADD
;
8964 list_del(&mac_cfg
->node
);
8967 } else if (is_del_list
) {
8968 mac_cfg
->state
= HCLGE_MAC_TO_DEL
;
8973 void hclge_rm_vport_all_mac_table(struct hclge_vport
*vport
, bool is_del_list
,
8974 enum HCLGE_MAC_ADDR_TYPE mac_type
)
8976 int (*unsync
)(struct hclge_vport
*vport
, const unsigned char *addr
);
8977 struct hclge_dev
*hdev
= vport
->back
;
8978 struct list_head tmp_del_list
, *list
;
8980 if (mac_type
== HCLGE_MAC_ADDR_UC
) {
8981 list
= &vport
->uc_mac_list
;
8982 unsync
= hclge_rm_uc_addr_common
;
8984 list
= &vport
->mc_mac_list
;
8985 unsync
= hclge_rm_mc_addr_common
;
8988 INIT_LIST_HEAD(&tmp_del_list
);
8991 set_bit(vport
->vport_id
, hdev
->vport_config_block
);
8993 spin_lock_bh(&vport
->mac_list_lock
);
8995 hclge_build_del_list(list
, is_del_list
, &tmp_del_list
);
8997 spin_unlock_bh(&vport
->mac_list_lock
);
8999 hclge_unsync_del_list(vport
, unsync
, is_del_list
, &tmp_del_list
);
9001 spin_lock_bh(&vport
->mac_list_lock
);
9003 hclge_sync_from_del_list(&tmp_del_list
, list
);
9005 spin_unlock_bh(&vport
->mac_list_lock
);
9008 /* remove all mac address when uninitailize */
9009 static void hclge_uninit_vport_mac_list(struct hclge_vport
*vport
,
9010 enum HCLGE_MAC_ADDR_TYPE mac_type
)
9012 struct hclge_mac_node
*mac_node
, *tmp
;
9013 struct hclge_dev
*hdev
= vport
->back
;
9014 struct list_head tmp_del_list
, *list
;
9016 INIT_LIST_HEAD(&tmp_del_list
);
9018 list
= (mac_type
== HCLGE_MAC_ADDR_UC
) ?
9019 &vport
->uc_mac_list
: &vport
->mc_mac_list
;
9021 spin_lock_bh(&vport
->mac_list_lock
);
9023 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
9024 switch (mac_node
->state
) {
9025 case HCLGE_MAC_TO_DEL
:
9026 case HCLGE_MAC_ACTIVE
:
9027 list_move_tail(&mac_node
->node
, &tmp_del_list
);
9029 case HCLGE_MAC_TO_ADD
:
9030 list_del(&mac_node
->node
);
9036 spin_unlock_bh(&vport
->mac_list_lock
);
9038 hclge_unsync_vport_mac_list(vport
, &tmp_del_list
, mac_type
);
9040 if (!list_empty(&tmp_del_list
))
9041 dev_warn(&hdev
->pdev
->dev
,
9042 "uninit %s mac list for vport %u not completely.\n",
9043 mac_type
== HCLGE_MAC_ADDR_UC
? "uc" : "mc",
9046 list_for_each_entry_safe(mac_node
, tmp
, &tmp_del_list
, node
) {
9047 list_del(&mac_node
->node
);
9052 static void hclge_uninit_mac_table(struct hclge_dev
*hdev
)
9054 struct hclge_vport
*vport
;
9057 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9058 vport
= &hdev
->vport
[i
];
9059 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_UC
);
9060 hclge_uninit_vport_mac_list(vport
, HCLGE_MAC_ADDR_MC
);
9064 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev
*hdev
,
9065 u16 cmdq_resp
, u8 resp_code
)
9067 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9068 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9069 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9070 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9075 dev_err(&hdev
->pdev
->dev
,
9076 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9081 switch (resp_code
) {
9082 case HCLGE_ETHERTYPE_SUCCESS_ADD
:
9083 case HCLGE_ETHERTYPE_ALREADY_ADD
:
9086 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW
:
9087 dev_err(&hdev
->pdev
->dev
,
9088 "add mac ethertype failed for manager table overflow.\n");
9089 return_status
= -EIO
;
9091 case HCLGE_ETHERTYPE_KEY_CONFLICT
:
9092 dev_err(&hdev
->pdev
->dev
,
9093 "add mac ethertype failed for key conflict.\n");
9094 return_status
= -EIO
;
9097 dev_err(&hdev
->pdev
->dev
,
9098 "add mac ethertype failed for undefined, code=%u.\n",
9100 return_status
= -EIO
;
9103 return return_status
;
9106 static int hclge_set_vf_mac(struct hnae3_handle
*handle
, int vf
,
9109 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9110 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9111 struct hclge_dev
*hdev
= vport
->back
;
9113 vport
= hclge_get_vf_vport(hdev
, vf
);
9117 hnae3_format_mac_addr(format_mac_addr
, mac_addr
);
9118 if (ether_addr_equal(mac_addr
, vport
->vf_info
.mac
)) {
9119 dev_info(&hdev
->pdev
->dev
,
9120 "Specified MAC(=%s) is same as before, no change committed!\n",
9125 ether_addr_copy(vport
->vf_info
.mac
, mac_addr
);
9127 /* there is a timewindow for PF to know VF unalive, it may
9128 * cause send mailbox fail, but it doesn't matter, VF will
9129 * query it when reinit.
9131 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
)) {
9132 dev_info(&hdev
->pdev
->dev
,
9133 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9134 vf
, format_mac_addr
);
9135 (void)hclge_inform_reset_assert_to_vf(vport
);
9139 dev_info(&hdev
->pdev
->dev
,
9140 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9141 vf
, format_mac_addr
);
9145 static int hclge_add_mgr_tbl(struct hclge_dev
*hdev
,
9146 const struct hclge_mac_mgr_tbl_entry_cmd
*req
)
9148 struct hclge_desc desc
;
9153 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_ETHTYPE_ADD
, false);
9154 memcpy(desc
.data
, req
, sizeof(struct hclge_mac_mgr_tbl_entry_cmd
));
9156 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9158 dev_err(&hdev
->pdev
->dev
,
9159 "add mac ethertype failed for cmd_send, ret =%d.\n",
9164 resp_code
= (le32_to_cpu(desc
.data
[0]) >> 8) & 0xff;
9165 retval
= le16_to_cpu(desc
.retval
);
9167 return hclge_get_mac_ethertype_cmd_status(hdev
, retval
, resp_code
);
9170 static int init_mgr_tbl(struct hclge_dev
*hdev
)
9175 for (i
= 0; i
< ARRAY_SIZE(hclge_mgr_table
); i
++) {
9176 ret
= hclge_add_mgr_tbl(hdev
, &hclge_mgr_table
[i
]);
9178 dev_err(&hdev
->pdev
->dev
,
9179 "add mac ethertype failed, ret =%d.\n",
9188 static void hclge_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
9190 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9191 struct hclge_dev
*hdev
= vport
->back
;
9193 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
9196 int hclge_update_mac_node_for_dev_addr(struct hclge_vport
*vport
,
9197 const u8
*old_addr
, const u8
*new_addr
)
9199 struct list_head
*list
= &vport
->uc_mac_list
;
9200 struct hclge_mac_node
*old_node
, *new_node
;
9202 new_node
= hclge_find_mac_node(list
, new_addr
);
9204 new_node
= kzalloc(sizeof(*new_node
), GFP_ATOMIC
);
9208 new_node
->state
= HCLGE_MAC_TO_ADD
;
9209 ether_addr_copy(new_node
->mac_addr
, new_addr
);
9210 list_add(&new_node
->node
, list
);
9212 if (new_node
->state
== HCLGE_MAC_TO_DEL
)
9213 new_node
->state
= HCLGE_MAC_ACTIVE
;
9215 /* make sure the new addr is in the list head, avoid dev
9216 * addr may be not re-added into mac table for the umv space
9217 * limitation after global/imp reset which will clear mac
9218 * table by hardware.
9220 list_move(&new_node
->node
, list
);
9223 if (old_addr
&& !ether_addr_equal(old_addr
, new_addr
)) {
9224 old_node
= hclge_find_mac_node(list
, old_addr
);
9226 if (old_node
->state
== HCLGE_MAC_TO_ADD
) {
9227 list_del(&old_node
->node
);
9230 old_node
->state
= HCLGE_MAC_TO_DEL
;
9235 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
9240 static int hclge_set_mac_addr(struct hnae3_handle
*handle
, const void *p
,
9243 const unsigned char *new_addr
= (const unsigned char *)p
;
9244 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9245 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
9246 struct hclge_dev
*hdev
= vport
->back
;
9247 unsigned char *old_addr
= NULL
;
9250 /* mac addr check */
9251 if (is_zero_ether_addr(new_addr
) ||
9252 is_broadcast_ether_addr(new_addr
) ||
9253 is_multicast_ether_addr(new_addr
)) {
9254 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9255 dev_err(&hdev
->pdev
->dev
,
9256 "change uc mac err! invalid mac: %s.\n",
9261 ret
= hclge_pause_addr_cfg(hdev
, new_addr
);
9263 dev_err(&hdev
->pdev
->dev
,
9264 "failed to configure mac pause address, ret = %d\n",
9270 old_addr
= hdev
->hw
.mac
.mac_addr
;
9272 spin_lock_bh(&vport
->mac_list_lock
);
9273 ret
= hclge_update_mac_node_for_dev_addr(vport
, old_addr
, new_addr
);
9275 hnae3_format_mac_addr(format_mac_addr
, new_addr
);
9276 dev_err(&hdev
->pdev
->dev
,
9277 "failed to change the mac addr:%s, ret = %d\n",
9278 format_mac_addr
, ret
);
9279 spin_unlock_bh(&vport
->mac_list_lock
);
9282 hclge_pause_addr_cfg(hdev
, old_addr
);
9286 /* we must update dev addr with spin lock protect, preventing dev addr
9287 * being removed by set_rx_mode path.
9289 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_addr
);
9290 spin_unlock_bh(&vport
->mac_list_lock
);
9292 hclge_task_schedule(hdev
, 0);
9297 static int hclge_mii_ioctl(struct hclge_dev
*hdev
, struct ifreq
*ifr
, int cmd
)
9299 struct mii_ioctl_data
*data
= if_mii(ifr
);
9301 if (!hnae3_dev_phy_imp_supported(hdev
))
9306 data
->phy_id
= hdev
->hw
.mac
.phy_addr
;
9307 /* this command reads phy id and register at the same time */
9310 data
->val_out
= hclge_read_phy_reg(hdev
, data
->reg_num
);
9314 return hclge_write_phy_reg(hdev
, data
->reg_num
, data
->val_in
);
9320 static int hclge_do_ioctl(struct hnae3_handle
*handle
, struct ifreq
*ifr
,
9323 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9324 struct hclge_dev
*hdev
= vport
->back
;
9328 return hclge_ptp_get_cfg(hdev
, ifr
);
9330 return hclge_ptp_set_cfg(hdev
, ifr
);
9332 if (!hdev
->hw
.mac
.phydev
)
9333 return hclge_mii_ioctl(hdev
, ifr
, cmd
);
9336 return phy_mii_ioctl(hdev
->hw
.mac
.phydev
, ifr
, cmd
);
9339 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev
*hdev
, u8 vf_id
,
9342 struct hclge_port_vlan_filter_bypass_cmd
*req
;
9343 struct hclge_desc desc
;
9346 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PORT_VLAN_BYPASS
, false);
9347 req
= (struct hclge_port_vlan_filter_bypass_cmd
*)desc
.data
;
9349 hnae3_set_bit(req
->bypass_state
, HCLGE_INGRESS_BYPASS_B
,
9352 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9354 dev_err(&hdev
->pdev
->dev
,
9355 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9361 static int hclge_set_vlan_filter_ctrl(struct hclge_dev
*hdev
, u8 vlan_type
,
9362 u8 fe_type
, bool filter_en
, u8 vf_id
)
9364 struct hclge_vlan_filter_ctrl_cmd
*req
;
9365 struct hclge_desc desc
;
9368 /* read current vlan filter parameter */
9369 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_CTRL
, true);
9370 req
= (struct hclge_vlan_filter_ctrl_cmd
*)desc
.data
;
9371 req
->vlan_type
= vlan_type
;
9374 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9376 dev_err(&hdev
->pdev
->dev
, "failed to get vport%u vlan filter config, ret = %d.\n",
9381 /* modify and write new config parameter */
9382 hclge_comm_cmd_reuse_desc(&desc
, false);
9383 req
->vlan_fe
= filter_en
?
9384 (req
->vlan_fe
| fe_type
) : (req
->vlan_fe
& ~fe_type
);
9386 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9388 dev_err(&hdev
->pdev
->dev
, "failed to set vport%u vlan filter, ret = %d.\n",
9394 static int hclge_set_vport_vlan_filter(struct hclge_vport
*vport
, bool enable
)
9396 struct hclge_dev
*hdev
= vport
->back
;
9397 struct hnae3_ae_dev
*ae_dev
= hdev
->ae_dev
;
9400 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9401 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9402 HCLGE_FILTER_FE_EGRESS_V1_B
,
9403 enable
, vport
->vport_id
);
9405 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9406 HCLGE_FILTER_FE_EGRESS
, enable
,
9411 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B
, ae_dev
->caps
)) {
9412 ret
= hclge_set_port_vlan_filter_bypass(hdev
, vport
->vport_id
,
9414 } else if (!vport
->vport_id
) {
9415 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
9418 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
9419 HCLGE_FILTER_FE_INGRESS
,
9426 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport
*vport
)
9428 struct hnae3_handle
*handle
= &vport
->nic
;
9429 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9430 struct hclge_dev
*hdev
= vport
->back
;
9432 if (vport
->vport_id
) {
9433 if (vport
->port_base_vlan_cfg
.state
!=
9434 HNAE3_PORT_BASE_VLAN_DISABLE
)
9437 if (vport
->vf_info
.trusted
&& vport
->vf_info
.request_uc_en
)
9439 } else if (handle
->netdev_flags
& HNAE3_USER_UPE
) {
9443 if (!vport
->req_vlan_fltr_en
)
9446 /* compatible with former device, always enable vlan filter */
9447 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
9450 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
)
9451 if (vlan
->vlan_id
!= 0)
9457 int hclge_enable_vport_vlan_filter(struct hclge_vport
*vport
, bool request_en
)
9459 struct hclge_dev
*hdev
= vport
->back
;
9463 mutex_lock(&hdev
->vport_lock
);
9465 vport
->req_vlan_fltr_en
= request_en
;
9467 need_en
= hclge_need_enable_vport_vlan_filter(vport
);
9468 if (need_en
== vport
->cur_vlan_fltr_en
) {
9469 mutex_unlock(&hdev
->vport_lock
);
9473 ret
= hclge_set_vport_vlan_filter(vport
, need_en
);
9475 mutex_unlock(&hdev
->vport_lock
);
9479 vport
->cur_vlan_fltr_en
= need_en
;
9481 mutex_unlock(&hdev
->vport_lock
);
9486 static int hclge_enable_vlan_filter(struct hnae3_handle
*handle
, bool enable
)
9488 struct hclge_vport
*vport
= hclge_get_vport(handle
);
9490 return hclge_enable_vport_vlan_filter(vport
, enable
);
9493 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev
*hdev
, u16 vfid
,
9494 bool is_kill
, u16 vlan
,
9495 struct hclge_desc
*desc
)
9497 struct hclge_vlan_filter_vf_cfg_cmd
*req0
;
9498 struct hclge_vlan_filter_vf_cfg_cmd
*req1
;
9503 hclge_cmd_setup_basic_desc(&desc
[0],
9504 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9505 hclge_cmd_setup_basic_desc(&desc
[1],
9506 HCLGE_OPC_VLAN_FILTER_VF_CFG
, false);
9508 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
9510 vf_byte_off
= vfid
/ 8;
9511 vf_byte_val
= 1 << (vfid
% 8);
9513 req0
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9514 req1
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[1].data
;
9516 req0
->vlan_id
= cpu_to_le16(vlan
);
9517 req0
->vlan_cfg
= is_kill
;
9519 if (vf_byte_off
< HCLGE_MAX_VF_BYTES
)
9520 req0
->vf_bitmap
[vf_byte_off
] = vf_byte_val
;
9522 req1
->vf_bitmap
[vf_byte_off
- HCLGE_MAX_VF_BYTES
] = vf_byte_val
;
9524 ret
= hclge_cmd_send(&hdev
->hw
, desc
, 2);
9526 dev_err(&hdev
->pdev
->dev
,
9527 "Send vf vlan command fail, ret =%d.\n",
9535 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev
*hdev
, u16 vfid
,
9536 bool is_kill
, struct hclge_desc
*desc
)
9538 struct hclge_vlan_filter_vf_cfg_cmd
*req
;
9540 req
= (struct hclge_vlan_filter_vf_cfg_cmd
*)desc
[0].data
;
9543 #define HCLGE_VF_VLAN_NO_ENTRY 2
9544 if (!req
->resp_code
|| req
->resp_code
== 1)
9547 if (req
->resp_code
== HCLGE_VF_VLAN_NO_ENTRY
) {
9548 set_bit(vfid
, hdev
->vf_vlan_full
);
9549 dev_warn(&hdev
->pdev
->dev
,
9550 "vf vlan table is full, vf vlan filter is disabled\n");
9554 dev_err(&hdev
->pdev
->dev
,
9555 "Add vf vlan filter fail, ret =%u.\n",
9558 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9559 if (!req
->resp_code
)
9562 /* vf vlan filter is disabled when vf vlan table is full,
9563 * then new vlan id will not be added into vf vlan table.
9564 * Just return 0 without warning, avoid massive verbose
9565 * print logs when unload.
9567 if (req
->resp_code
== HCLGE_VF_VLAN_DEL_NO_FOUND
)
9570 dev_err(&hdev
->pdev
->dev
,
9571 "Kill vf vlan filter fail, ret =%u.\n",
9578 static int hclge_set_vf_vlan_common(struct hclge_dev
*hdev
, u16 vfid
,
9579 bool is_kill
, u16 vlan
)
9581 struct hclge_vport
*vport
= &hdev
->vport
[vfid
];
9582 struct hclge_desc desc
[2];
9585 /* if vf vlan table is full, firmware will close vf vlan filter, it
9586 * is unable and unnecessary to add new vlan id to vf vlan filter.
9587 * If spoof check is enable, and vf vlan is full, it shouldn't add
9588 * new vlan, because tx packets with these vlan id will be dropped.
9590 if (test_bit(vfid
, hdev
->vf_vlan_full
) && !is_kill
) {
9591 if (vport
->vf_info
.spoofchk
&& vlan
) {
9592 dev_err(&hdev
->pdev
->dev
,
9593 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9599 ret
= hclge_set_vf_vlan_filter_cmd(hdev
, vfid
, is_kill
, vlan
, desc
);
9603 return hclge_check_vf_vlan_cmd_status(hdev
, vfid
, is_kill
, desc
);
9606 static int hclge_set_port_vlan_filter(struct hclge_dev
*hdev
, __be16 proto
,
9607 u16 vlan_id
, bool is_kill
)
9609 struct hclge_vlan_filter_pf_cfg_cmd
*req
;
9610 struct hclge_desc desc
;
9611 u8 vlan_offset_byte_val
;
9612 u8 vlan_offset_byte
;
9616 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_FILTER_PF_CFG
, false);
9618 vlan_offset_160
= vlan_id
/ HCLGE_VLAN_ID_OFFSET_STEP
;
9619 vlan_offset_byte
= (vlan_id
% HCLGE_VLAN_ID_OFFSET_STEP
) /
9620 HCLGE_VLAN_BYTE_SIZE
;
9621 vlan_offset_byte_val
= 1 << (vlan_id
% HCLGE_VLAN_BYTE_SIZE
);
9623 req
= (struct hclge_vlan_filter_pf_cfg_cmd
*)desc
.data
;
9624 req
->vlan_offset
= vlan_offset_160
;
9625 req
->vlan_cfg
= is_kill
;
9626 req
->vlan_offset_bitmap
[vlan_offset_byte
] = vlan_offset_byte_val
;
9628 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9630 dev_err(&hdev
->pdev
->dev
,
9631 "port vlan command, send fail, ret =%d.\n", ret
);
9635 static bool hclge_need_update_port_vlan(struct hclge_dev
*hdev
, u16 vport_id
,
9636 u16 vlan_id
, bool is_kill
)
9638 /* vlan 0 may be added twice when 8021q module is enabled */
9639 if (!is_kill
&& !vlan_id
&&
9640 test_bit(vport_id
, hdev
->vlan_table
[vlan_id
]))
9643 if (!is_kill
&& test_and_set_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9644 dev_warn(&hdev
->pdev
->dev
,
9645 "Add port vlan failed, vport %u is already in vlan %u\n",
9651 !test_and_clear_bit(vport_id
, hdev
->vlan_table
[vlan_id
])) {
9652 dev_warn(&hdev
->pdev
->dev
,
9653 "Delete port vlan failed, vport %u is not in vlan %u\n",
9661 static int hclge_set_vlan_filter_hw(struct hclge_dev
*hdev
, __be16 proto
,
9662 u16 vport_id
, u16 vlan_id
,
9665 u16 vport_idx
, vport_num
= 0;
9668 if (is_kill
&& !vlan_id
)
9671 if (vlan_id
>= VLAN_N_VID
)
9674 ret
= hclge_set_vf_vlan_common(hdev
, vport_id
, is_kill
, vlan_id
);
9676 dev_err(&hdev
->pdev
->dev
,
9677 "Set %u vport vlan filter config fail, ret =%d.\n",
9682 if (!hclge_need_update_port_vlan(hdev
, vport_id
, vlan_id
, is_kill
))
9685 for_each_set_bit(vport_idx
, hdev
->vlan_table
[vlan_id
], HCLGE_VPORT_NUM
)
9688 if ((is_kill
&& vport_num
== 0) || (!is_kill
&& vport_num
== 1))
9689 ret
= hclge_set_port_vlan_filter(hdev
, proto
, vlan_id
,
9695 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport
*vport
)
9697 struct hclge_tx_vtag_cfg
*vcfg
= &vport
->txvlan_cfg
;
9698 struct hclge_vport_vtag_tx_cfg_cmd
*req
;
9699 struct hclge_dev
*hdev
= vport
->back
;
9700 struct hclge_desc desc
;
9704 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_TX_CFG
, false);
9706 req
= (struct hclge_vport_vtag_tx_cfg_cmd
*)desc
.data
;
9707 req
->def_vlan_tag1
= cpu_to_le16(vcfg
->default_tag1
);
9708 req
->def_vlan_tag2
= cpu_to_le16(vcfg
->default_tag2
);
9709 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG1_B
,
9710 vcfg
->accept_tag1
? 1 : 0);
9711 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG1_B
,
9712 vcfg
->accept_untag1
? 1 : 0);
9713 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_TAG2_B
,
9714 vcfg
->accept_tag2
? 1 : 0);
9715 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_ACCEPT_UNTAG2_B
,
9716 vcfg
->accept_untag2
? 1 : 0);
9717 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG1_EN_B
,
9718 vcfg
->insert_tag1_en
? 1 : 0);
9719 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_PORT_INS_TAG2_EN_B
,
9720 vcfg
->insert_tag2_en
? 1 : 0);
9721 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_TAG_SHIFT_MODE_EN_B
,
9722 vcfg
->tag_shift_mode_en
? 1 : 0);
9723 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_CFG_NIC_ROCE_SEL_B
, 0);
9725 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9726 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9727 HCLGE_VF_NUM_PER_BYTE
;
9728 req
->vf_bitmap
[bmap_index
] =
9729 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9731 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9733 dev_err(&hdev
->pdev
->dev
,
9734 "Send port txvlan cfg command fail, ret =%d\n",
9740 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport
*vport
)
9742 struct hclge_rx_vtag_cfg
*vcfg
= &vport
->rxvlan_cfg
;
9743 struct hclge_vport_vtag_rx_cfg_cmd
*req
;
9744 struct hclge_dev
*hdev
= vport
->back
;
9745 struct hclge_desc desc
;
9749 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_VLAN_PORT_RX_CFG
, false);
9751 req
= (struct hclge_vport_vtag_rx_cfg_cmd
*)desc
.data
;
9752 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG1_EN_B
,
9753 vcfg
->strip_tag1_en
? 1 : 0);
9754 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_REM_TAG2_EN_B
,
9755 vcfg
->strip_tag2_en
? 1 : 0);
9756 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG1_EN_B
,
9757 vcfg
->vlan1_vlan_prionly
? 1 : 0);
9758 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_SHOW_TAG2_EN_B
,
9759 vcfg
->vlan2_vlan_prionly
? 1 : 0);
9760 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG1_EN_B
,
9761 vcfg
->strip_tag1_discard_en
? 1 : 0);
9762 hnae3_set_bit(req
->vport_vlan_cfg
, HCLGE_DISCARD_TAG2_EN_B
,
9763 vcfg
->strip_tag2_discard_en
? 1 : 0);
9765 req
->vf_offset
= vport
->vport_id
/ HCLGE_VF_NUM_PER_CMD
;
9766 bmap_index
= vport
->vport_id
% HCLGE_VF_NUM_PER_CMD
/
9767 HCLGE_VF_NUM_PER_BYTE
;
9768 req
->vf_bitmap
[bmap_index
] =
9769 1U << (vport
->vport_id
% HCLGE_VF_NUM_PER_BYTE
);
9771 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9773 dev_err(&hdev
->pdev
->dev
,
9774 "Send port rxvlan cfg command fail, ret =%d\n",
9780 static int hclge_vlan_offload_cfg(struct hclge_vport
*vport
,
9781 u16 port_base_vlan_state
,
9782 u16 vlan_tag
, u8 qos
)
9786 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9787 vport
->txvlan_cfg
.accept_tag1
= true;
9788 vport
->txvlan_cfg
.insert_tag1_en
= false;
9789 vport
->txvlan_cfg
.default_tag1
= 0;
9791 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(vport
->nic
.pdev
);
9793 vport
->txvlan_cfg
.accept_tag1
=
9794 ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
;
9795 vport
->txvlan_cfg
.insert_tag1_en
= true;
9796 vport
->txvlan_cfg
.default_tag1
= (qos
<< VLAN_PRIO_SHIFT
) |
9800 vport
->txvlan_cfg
.accept_untag1
= true;
9802 /* accept_tag2 and accept_untag2 are not supported on
9803 * pdev revision(0x20), new revision support them,
9804 * this two fields can not be configured by user.
9806 vport
->txvlan_cfg
.accept_tag2
= true;
9807 vport
->txvlan_cfg
.accept_untag2
= true;
9808 vport
->txvlan_cfg
.insert_tag2_en
= false;
9809 vport
->txvlan_cfg
.default_tag2
= 0;
9810 vport
->txvlan_cfg
.tag_shift_mode_en
= true;
9812 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
9813 vport
->rxvlan_cfg
.strip_tag1_en
= false;
9814 vport
->rxvlan_cfg
.strip_tag2_en
=
9815 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9816 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
9818 vport
->rxvlan_cfg
.strip_tag1_en
=
9819 vport
->rxvlan_cfg
.rx_vlan_offload_en
;
9820 vport
->rxvlan_cfg
.strip_tag2_en
= true;
9821 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
9824 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
9825 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
9826 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
9828 ret
= hclge_set_vlan_tx_offload_cfg(vport
);
9832 return hclge_set_vlan_rx_offload_cfg(vport
);
9835 static int hclge_set_vlan_protocol_type(struct hclge_dev
*hdev
)
9837 struct hclge_rx_vlan_type_cfg_cmd
*rx_req
;
9838 struct hclge_tx_vlan_type_cfg_cmd
*tx_req
;
9839 struct hclge_desc desc
;
9842 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_TYPE_ID
, false);
9843 rx_req
= (struct hclge_rx_vlan_type_cfg_cmd
*)desc
.data
;
9844 rx_req
->ot_fst_vlan_type
=
9845 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
);
9846 rx_req
->ot_sec_vlan_type
=
9847 cpu_to_le16(hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
);
9848 rx_req
->in_fst_vlan_type
=
9849 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
);
9850 rx_req
->in_sec_vlan_type
=
9851 cpu_to_le16(hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
);
9853 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9855 dev_err(&hdev
->pdev
->dev
,
9856 "Send rxvlan protocol type command fail, ret =%d\n",
9861 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_MAC_VLAN_INSERT
, false);
9863 tx_req
= (struct hclge_tx_vlan_type_cfg_cmd
*)desc
.data
;
9864 tx_req
->ot_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_ot_vlan_type
);
9865 tx_req
->in_vlan_type
= cpu_to_le16(hdev
->vlan_type_cfg
.tx_in_vlan_type
);
9867 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
9869 dev_err(&hdev
->pdev
->dev
,
9870 "Send txvlan protocol type command fail, ret =%d\n",
9876 static int hclge_init_vlan_filter(struct hclge_dev
*hdev
)
9878 struct hclge_vport
*vport
;
9882 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
9883 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9884 HCLGE_FILTER_FE_EGRESS_V1_B
,
9887 /* for revision 0x21, vf vlan filter is per function */
9888 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9889 vport
= &hdev
->vport
[i
];
9890 ret
= hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
9891 HCLGE_FILTER_FE_EGRESS
, true,
9895 vport
->cur_vlan_fltr_en
= true;
9898 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_PORT
,
9899 HCLGE_FILTER_FE_INGRESS
, true, 0);
9902 static int hclge_init_vlan_type(struct hclge_dev
*hdev
)
9904 hdev
->vlan_type_cfg
.rx_in_fst_vlan_type
= ETH_P_8021Q
;
9905 hdev
->vlan_type_cfg
.rx_in_sec_vlan_type
= ETH_P_8021Q
;
9906 hdev
->vlan_type_cfg
.rx_ot_fst_vlan_type
= ETH_P_8021Q
;
9907 hdev
->vlan_type_cfg
.rx_ot_sec_vlan_type
= ETH_P_8021Q
;
9908 hdev
->vlan_type_cfg
.tx_ot_vlan_type
= ETH_P_8021Q
;
9909 hdev
->vlan_type_cfg
.tx_in_vlan_type
= ETH_P_8021Q
;
9911 return hclge_set_vlan_protocol_type(hdev
);
9914 static int hclge_init_vport_vlan_offload(struct hclge_dev
*hdev
)
9916 struct hclge_port_base_vlan_config
*cfg
;
9917 struct hclge_vport
*vport
;
9921 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
9922 vport
= &hdev
->vport
[i
];
9923 cfg
= &vport
->port_base_vlan_cfg
;
9925 ret
= hclge_vlan_offload_cfg(vport
, cfg
->state
,
9926 cfg
->vlan_info
.vlan_tag
,
9927 cfg
->vlan_info
.qos
);
9934 static int hclge_init_vlan_config(struct hclge_dev
*hdev
)
9936 struct hnae3_handle
*handle
= &hdev
->vport
[0].nic
;
9939 ret
= hclge_init_vlan_filter(hdev
);
9943 ret
= hclge_init_vlan_type(hdev
);
9947 ret
= hclge_init_vport_vlan_offload(hdev
);
9951 return hclge_set_vlan_filter(handle
, htons(ETH_P_8021Q
), 0, false);
9954 static void hclge_add_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
9957 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9958 struct hclge_dev
*hdev
= vport
->back
;
9960 mutex_lock(&hdev
->vport_lock
);
9962 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
9963 if (vlan
->vlan_id
== vlan_id
) {
9964 mutex_unlock(&hdev
->vport_lock
);
9969 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
9971 mutex_unlock(&hdev
->vport_lock
);
9975 vlan
->hd_tbl_status
= writen_to_tbl
;
9976 vlan
->vlan_id
= vlan_id
;
9978 list_add_tail(&vlan
->node
, &vport
->vlan_list
);
9979 mutex_unlock(&hdev
->vport_lock
);
9982 static int hclge_add_vport_all_vlan_table(struct hclge_vport
*vport
)
9984 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
9985 struct hclge_dev
*hdev
= vport
->back
;
9988 mutex_lock(&hdev
->vport_lock
);
9990 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
9991 if (!vlan
->hd_tbl_status
) {
9992 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
9994 vlan
->vlan_id
, false);
9996 dev_err(&hdev
->pdev
->dev
,
9997 "restore vport vlan list failed, ret=%d\n",
10000 mutex_unlock(&hdev
->vport_lock
);
10004 vlan
->hd_tbl_status
= true;
10007 mutex_unlock(&hdev
->vport_lock
);
10012 static void hclge_rm_vport_vlan_table(struct hclge_vport
*vport
, u16 vlan_id
,
10015 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10016 struct hclge_dev
*hdev
= vport
->back
;
10018 mutex_lock(&hdev
->vport_lock
);
10020 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10021 if (vlan
->vlan_id
== vlan_id
) {
10022 if (is_write_tbl
&& vlan
->hd_tbl_status
)
10023 hclge_set_vlan_filter_hw(hdev
,
10024 htons(ETH_P_8021Q
),
10029 list_del(&vlan
->node
);
10035 mutex_unlock(&hdev
->vport_lock
);
10038 void hclge_rm_vport_all_vlan_table(struct hclge_vport
*vport
, bool is_del_list
)
10040 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10041 struct hclge_dev
*hdev
= vport
->back
;
10043 mutex_lock(&hdev
->vport_lock
);
10045 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10046 if (vlan
->hd_tbl_status
)
10047 hclge_set_vlan_filter_hw(hdev
,
10048 htons(ETH_P_8021Q
),
10053 vlan
->hd_tbl_status
= false;
10055 list_del(&vlan
->node
);
10059 clear_bit(vport
->vport_id
, hdev
->vf_vlan_full
);
10060 mutex_unlock(&hdev
->vport_lock
);
10063 void hclge_uninit_vport_vlan_table(struct hclge_dev
*hdev
)
10065 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10066 struct hclge_vport
*vport
;
10069 mutex_lock(&hdev
->vport_lock
);
10071 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10072 vport
= &hdev
->vport
[i
];
10073 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10074 list_del(&vlan
->node
);
10079 mutex_unlock(&hdev
->vport_lock
);
10082 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev
*hdev
)
10084 struct hclge_vlan_info
*vlan_info
;
10085 struct hclge_vport
*vport
;
10092 /* PF should restore all vfs port base vlan */
10093 for (vf_id
= 0; vf_id
< hdev
->num_alloc_vfs
; vf_id
++) {
10094 vport
= &hdev
->vport
[vf_id
+ HCLGE_VF_VPORT_START_NUM
];
10095 vlan_info
= vport
->port_base_vlan_cfg
.tbl_sta
?
10096 &vport
->port_base_vlan_cfg
.vlan_info
:
10097 &vport
->port_base_vlan_cfg
.old_vlan_info
;
10099 vlan_id
= vlan_info
->vlan_tag
;
10100 vlan_proto
= vlan_info
->vlan_proto
;
10101 state
= vport
->port_base_vlan_cfg
.state
;
10103 if (state
!= HNAE3_PORT_BASE_VLAN_DISABLE
) {
10104 clear_bit(vport
->vport_id
, hdev
->vlan_table
[vlan_id
]);
10105 ret
= hclge_set_vlan_filter_hw(hdev
, htons(vlan_proto
),
10108 vport
->port_base_vlan_cfg
.tbl_sta
= ret
== 0;
10113 void hclge_restore_vport_vlan_table(struct hclge_vport
*vport
)
10115 struct hclge_vport_vlan_cfg
*vlan
, *tmp
;
10116 struct hclge_dev
*hdev
= vport
->back
;
10119 mutex_lock(&hdev
->vport_lock
);
10121 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10122 list_for_each_entry_safe(vlan
, tmp
, &vport
->vlan_list
, node
) {
10123 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10125 vlan
->vlan_id
, false);
10128 vlan
->hd_tbl_status
= true;
10132 mutex_unlock(&hdev
->vport_lock
);
10135 /* For global reset and imp reset, hardware will clear the mac table,
10136 * so we change the mac address state from ACTIVE to TO_ADD, then they
10137 * can be restored in the service task after reset complete. Furtherly,
10138 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10139 * be restored after reset, so just remove these mac nodes from mac_list.
10141 static void hclge_mac_node_convert_for_reset(struct list_head
*list
)
10143 struct hclge_mac_node
*mac_node
, *tmp
;
10145 list_for_each_entry_safe(mac_node
, tmp
, list
, node
) {
10146 if (mac_node
->state
== HCLGE_MAC_ACTIVE
) {
10147 mac_node
->state
= HCLGE_MAC_TO_ADD
;
10148 } else if (mac_node
->state
== HCLGE_MAC_TO_DEL
) {
10149 list_del(&mac_node
->node
);
10155 void hclge_restore_mac_table_common(struct hclge_vport
*vport
)
10157 spin_lock_bh(&vport
->mac_list_lock
);
10159 hclge_mac_node_convert_for_reset(&vport
->uc_mac_list
);
10160 hclge_mac_node_convert_for_reset(&vport
->mc_mac_list
);
10161 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE
, &vport
->state
);
10163 spin_unlock_bh(&vport
->mac_list_lock
);
10166 static void hclge_restore_hw_table(struct hclge_dev
*hdev
)
10168 struct hclge_vport
*vport
= &hdev
->vport
[0];
10169 struct hnae3_handle
*handle
= &vport
->nic
;
10171 hclge_restore_mac_table_common(vport
);
10172 hclge_restore_vport_port_base_vlan_config(hdev
);
10173 hclge_restore_vport_vlan_table(vport
);
10174 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED
, &hdev
->state
);
10175 hclge_restore_fd_entries(handle
);
10178 int hclge_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
10180 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10182 if (vport
->port_base_vlan_cfg
.state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10183 vport
->rxvlan_cfg
.strip_tag1_en
= false;
10184 vport
->rxvlan_cfg
.strip_tag2_en
= enable
;
10185 vport
->rxvlan_cfg
.strip_tag2_discard_en
= false;
10187 vport
->rxvlan_cfg
.strip_tag1_en
= enable
;
10188 vport
->rxvlan_cfg
.strip_tag2_en
= true;
10189 vport
->rxvlan_cfg
.strip_tag2_discard_en
= true;
10192 vport
->rxvlan_cfg
.strip_tag1_discard_en
= false;
10193 vport
->rxvlan_cfg
.vlan1_vlan_prionly
= false;
10194 vport
->rxvlan_cfg
.vlan2_vlan_prionly
= false;
10195 vport
->rxvlan_cfg
.rx_vlan_offload_en
= enable
;
10197 return hclge_set_vlan_rx_offload_cfg(vport
);
10200 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport
*vport
)
10202 struct hclge_dev
*hdev
= vport
->back
;
10204 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, hdev
->ae_dev
->caps
))
10205 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
, &vport
->state
);
10208 static int hclge_update_vlan_filter_entries(struct hclge_vport
*vport
,
10209 u16 port_base_vlan_state
,
10210 struct hclge_vlan_info
*new_info
,
10211 struct hclge_vlan_info
*old_info
)
10213 struct hclge_dev
*hdev
= vport
->back
;
10216 if (port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_ENABLE
) {
10217 hclge_rm_vport_all_vlan_table(vport
, false);
10218 /* force clear VLAN 0 */
10219 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, true, 0);
10222 return hclge_set_vlan_filter_hw(hdev
,
10223 htons(new_info
->vlan_proto
),
10225 new_info
->vlan_tag
,
10229 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10231 /* force add VLAN 0 */
10232 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
, false, 0);
10236 ret
= hclge_set_vlan_filter_hw(hdev
, htons(old_info
->vlan_proto
),
10237 vport
->vport_id
, old_info
->vlan_tag
,
10242 return hclge_add_vport_all_vlan_table(vport
);
10245 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info
*new_cfg
,
10246 const struct hclge_vlan_info
*old_cfg
)
10248 if (new_cfg
->vlan_tag
!= old_cfg
->vlan_tag
)
10251 if (new_cfg
->vlan_tag
== 0 && (new_cfg
->qos
== 0 || old_cfg
->qos
== 0))
10257 static int hclge_modify_port_base_vlan_tag(struct hclge_vport
*vport
,
10258 struct hclge_vlan_info
*new_info
,
10259 struct hclge_vlan_info
*old_info
)
10261 struct hclge_dev
*hdev
= vport
->back
;
10264 /* add new VLAN tag */
10265 ret
= hclge_set_vlan_filter_hw(hdev
, htons(new_info
->vlan_proto
),
10266 vport
->vport_id
, new_info
->vlan_tag
,
10271 vport
->port_base_vlan_cfg
.tbl_sta
= false;
10272 /* remove old VLAN tag */
10273 if (old_info
->vlan_tag
== 0)
10274 ret
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
,
10277 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10279 old_info
->vlan_tag
, true);
10281 dev_err(&hdev
->pdev
->dev
,
10282 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10283 vport
->vport_id
, old_info
->vlan_tag
, ret
);
10288 int hclge_update_port_base_vlan_cfg(struct hclge_vport
*vport
, u16 state
,
10289 struct hclge_vlan_info
*vlan_info
)
10291 struct hnae3_handle
*nic
= &vport
->nic
;
10292 struct hclge_vlan_info
*old_vlan_info
;
10295 old_vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10297 ret
= hclge_vlan_offload_cfg(vport
, state
, vlan_info
->vlan_tag
,
10302 if (!hclge_need_update_vlan_filter(vlan_info
, old_vlan_info
))
10305 if (state
== HNAE3_PORT_BASE_VLAN_MODIFY
)
10306 ret
= hclge_modify_port_base_vlan_tag(vport
, vlan_info
,
10309 ret
= hclge_update_vlan_filter_entries(vport
, state
, vlan_info
,
10315 vport
->port_base_vlan_cfg
.state
= state
;
10316 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
)
10317 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_DISABLE
;
10319 nic
->port_base_vlan_state
= HNAE3_PORT_BASE_VLAN_ENABLE
;
10321 vport
->port_base_vlan_cfg
.old_vlan_info
= *old_vlan_info
;
10322 vport
->port_base_vlan_cfg
.vlan_info
= *vlan_info
;
10323 vport
->port_base_vlan_cfg
.tbl_sta
= true;
10324 hclge_set_vport_vlan_fltr_change(vport
);
10329 static u16
hclge_get_port_base_vlan_state(struct hclge_vport
*vport
,
10330 enum hnae3_port_base_vlan_state state
,
10333 if (state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10335 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10337 return HNAE3_PORT_BASE_VLAN_ENABLE
;
10341 return HNAE3_PORT_BASE_VLAN_DISABLE
;
10343 if (vport
->port_base_vlan_cfg
.vlan_info
.vlan_tag
== vlan
&&
10344 vport
->port_base_vlan_cfg
.vlan_info
.qos
== qos
)
10345 return HNAE3_PORT_BASE_VLAN_NOCHANGE
;
10347 return HNAE3_PORT_BASE_VLAN_MODIFY
;
10350 static int hclge_set_vf_vlan_filter(struct hnae3_handle
*handle
, int vfid
,
10351 u16 vlan
, u8 qos
, __be16 proto
)
10353 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
10354 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10355 struct hclge_dev
*hdev
= vport
->back
;
10356 struct hclge_vlan_info vlan_info
;
10360 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
10361 return -EOPNOTSUPP
;
10363 vport
= hclge_get_vf_vport(hdev
, vfid
);
10367 /* qos is a 3 bits value, so can not be bigger than 7 */
10368 if (vlan
> VLAN_N_VID
- 1 || qos
> 7)
10370 if (proto
!= htons(ETH_P_8021Q
))
10371 return -EPROTONOSUPPORT
;
10373 state
= hclge_get_port_base_vlan_state(vport
,
10374 vport
->port_base_vlan_cfg
.state
,
10376 if (state
== HNAE3_PORT_BASE_VLAN_NOCHANGE
)
10379 vlan_info
.vlan_tag
= vlan
;
10380 vlan_info
.qos
= qos
;
10381 vlan_info
.vlan_proto
= ntohs(proto
);
10383 ret
= hclge_update_port_base_vlan_cfg(vport
, state
, &vlan_info
);
10385 dev_err(&hdev
->pdev
->dev
,
10386 "failed to update port base vlan for vf %d, ret = %d\n",
10391 /* there is a timewindow for PF to know VF unalive, it may
10392 * cause send mailbox fail, but it doesn't matter, VF will
10393 * query it when reinit.
10394 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10397 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V3
) {
10398 if (test_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
))
10399 (void)hclge_push_vf_port_base_vlan_info(&hdev
->vport
[0],
10404 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN
,
10405 &vport
->need_notify
);
10410 static void hclge_clear_vf_vlan(struct hclge_dev
*hdev
)
10412 struct hclge_vlan_info
*vlan_info
;
10413 struct hclge_vport
*vport
;
10417 /* clear port base vlan for all vf */
10418 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
10419 vport
= &hdev
->vport
[vf
];
10420 vlan_info
= &vport
->port_base_vlan_cfg
.vlan_info
;
10422 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10424 vlan_info
->vlan_tag
, true);
10426 dev_err(&hdev
->pdev
->dev
,
10427 "failed to clear vf vlan for vf%d, ret = %d\n",
10428 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
10432 int hclge_set_vlan_filter(struct hnae3_handle
*handle
, __be16 proto
,
10433 u16 vlan_id
, bool is_kill
)
10435 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10436 struct hclge_dev
*hdev
= vport
->back
;
10437 bool writen_to_tbl
= false;
10440 /* When device is resetting or reset failed, firmware is unable to
10441 * handle mailbox. Just record the vlan id, and remove it after
10444 if ((test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
10445 test_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
)) && is_kill
) {
10446 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10450 /* when port base vlan enabled, we use port base vlan as the vlan
10451 * filter entry. In this case, we don't update vlan filter table
10452 * when user add new vlan or remove exist vlan, just update the vport
10453 * vlan list. The vlan id in vlan list will be writen in vlan filter
10454 * table until port base vlan disabled
10456 if (handle
->port_base_vlan_state
== HNAE3_PORT_BASE_VLAN_DISABLE
) {
10457 ret
= hclge_set_vlan_filter_hw(hdev
, proto
, vport
->vport_id
,
10459 writen_to_tbl
= true;
10464 hclge_add_vport_vlan_table(vport
, vlan_id
,
10466 else if (is_kill
&& vlan_id
!= 0)
10467 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10468 } else if (is_kill
) {
10469 /* when remove hw vlan filter failed, record the vlan id,
10470 * and try to remove it from hw later, to be consistence
10473 set_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10476 hclge_set_vport_vlan_fltr_change(vport
);
10481 static void hclge_sync_vlan_fltr_state(struct hclge_dev
*hdev
)
10483 struct hclge_vport
*vport
;
10487 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10488 vport
= &hdev
->vport
[i
];
10489 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10493 ret
= hclge_enable_vport_vlan_filter(vport
,
10494 vport
->req_vlan_fltr_en
);
10496 dev_err(&hdev
->pdev
->dev
,
10497 "failed to sync vlan filter state for vport%u, ret = %d\n",
10498 vport
->vport_id
, ret
);
10499 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
10506 static void hclge_sync_vlan_filter(struct hclge_dev
*hdev
)
10508 #define HCLGE_MAX_SYNC_COUNT 60
10510 int i
, ret
, sync_cnt
= 0;
10513 /* start from vport 1 for PF is always alive */
10514 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
10515 struct hclge_vport
*vport
= &hdev
->vport
[i
];
10517 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10519 while (vlan_id
!= VLAN_N_VID
) {
10520 ret
= hclge_set_vlan_filter_hw(hdev
, htons(ETH_P_8021Q
),
10521 vport
->vport_id
, vlan_id
,
10523 if (ret
&& ret
!= -EINVAL
)
10526 clear_bit(vlan_id
, vport
->vlan_del_fail_bmap
);
10527 hclge_rm_vport_vlan_table(vport
, vlan_id
, false);
10528 hclge_set_vport_vlan_fltr_change(vport
);
10531 if (sync_cnt
>= HCLGE_MAX_SYNC_COUNT
)
10534 vlan_id
= find_first_bit(vport
->vlan_del_fail_bmap
,
10539 hclge_sync_vlan_fltr_state(hdev
);
10542 static int hclge_set_mac_mtu(struct hclge_dev
*hdev
, int new_mps
)
10544 struct hclge_config_max_frm_size_cmd
*req
;
10545 struct hclge_desc desc
;
10547 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CONFIG_MAX_FRM_SIZE
, false);
10549 req
= (struct hclge_config_max_frm_size_cmd
*)desc
.data
;
10550 req
->max_frm_size
= cpu_to_le16(new_mps
);
10551 req
->min_frm_size
= HCLGE_MAC_MIN_FRAME
;
10553 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10556 static int hclge_set_mtu(struct hnae3_handle
*handle
, int new_mtu
)
10558 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10560 return hclge_set_vport_mtu(vport
, new_mtu
);
10563 int hclge_set_vport_mtu(struct hclge_vport
*vport
, int new_mtu
)
10565 struct hclge_dev
*hdev
= vport
->back
;
10566 int i
, max_frm_size
, ret
;
10568 /* HW supprt 2 layer vlan */
10569 max_frm_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
10570 if (max_frm_size
< HCLGE_MAC_MIN_FRAME
||
10571 max_frm_size
> hdev
->ae_dev
->dev_specs
.max_frm_size
)
10574 max_frm_size
= max(max_frm_size
, HCLGE_MAC_DEFAULT_FRAME
);
10575 mutex_lock(&hdev
->vport_lock
);
10576 /* VF's mps must fit within hdev->mps */
10577 if (vport
->vport_id
&& max_frm_size
> hdev
->mps
) {
10578 mutex_unlock(&hdev
->vport_lock
);
10580 } else if (vport
->vport_id
) {
10581 vport
->mps
= max_frm_size
;
10582 mutex_unlock(&hdev
->vport_lock
);
10586 /* PF's mps must be greater then VF's mps */
10587 for (i
= 1; i
< hdev
->num_alloc_vport
; i
++)
10588 if (max_frm_size
< hdev
->vport
[i
].mps
) {
10589 dev_err(&hdev
->pdev
->dev
,
10590 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10591 i
, hdev
->vport
[i
].mps
);
10592 mutex_unlock(&hdev
->vport_lock
);
10596 hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
10598 ret
= hclge_set_mac_mtu(hdev
, max_frm_size
);
10600 dev_err(&hdev
->pdev
->dev
,
10601 "Change mtu fail, ret =%d\n", ret
);
10605 hdev
->mps
= max_frm_size
;
10606 vport
->mps
= max_frm_size
;
10608 ret
= hclge_buffer_alloc(hdev
);
10610 dev_err(&hdev
->pdev
->dev
,
10611 "Allocate buffer fail, ret =%d\n", ret
);
10614 hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
10615 mutex_unlock(&hdev
->vport_lock
);
10619 static int hclge_reset_tqp_cmd_send(struct hclge_dev
*hdev
, u16 queue_id
,
10622 struct hclge_reset_tqp_queue_cmd
*req
;
10623 struct hclge_desc desc
;
10626 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, false);
10628 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10629 req
->tqp_id
= cpu_to_le16(queue_id
);
10631 hnae3_set_bit(req
->reset_req
, HCLGE_TQP_RESET_B
, 1U);
10633 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10635 dev_err(&hdev
->pdev
->dev
,
10636 "Send tqp reset cmd error, status =%d\n", ret
);
10643 static int hclge_get_reset_status(struct hclge_dev
*hdev
, u16 queue_id
,
10646 struct hclge_reset_tqp_queue_cmd
*req
;
10647 struct hclge_desc desc
;
10650 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_RESET_TQP_QUEUE
, true);
10652 req
= (struct hclge_reset_tqp_queue_cmd
*)desc
.data
;
10653 req
->tqp_id
= cpu_to_le16(queue_id
);
10655 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10657 dev_err(&hdev
->pdev
->dev
,
10658 "Get reset status error, status =%d\n", ret
);
10662 *reset_status
= hnae3_get_bit(req
->ready_to_reset
, HCLGE_TQP_RESET_B
);
10667 u16
hclge_covert_handle_qid_global(struct hnae3_handle
*handle
, u16 queue_id
)
10669 struct hclge_comm_tqp
*tqp
;
10670 struct hnae3_queue
*queue
;
10672 queue
= handle
->kinfo
.tqp
[queue_id
];
10673 tqp
= container_of(queue
, struct hclge_comm_tqp
, q
);
10678 static int hclge_reset_tqp_cmd(struct hnae3_handle
*handle
)
10680 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10681 struct hclge_dev
*hdev
= vport
->back
;
10682 u16 reset_try_times
= 0;
10688 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
10689 queue_gid
= hclge_covert_handle_qid_global(handle
, i
);
10690 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, true);
10692 dev_err(&hdev
->pdev
->dev
,
10693 "failed to send reset tqp cmd, ret = %d\n",
10698 while (reset_try_times
++ < HCLGE_TQP_RESET_TRY_TIMES
) {
10699 ret
= hclge_get_reset_status(hdev
, queue_gid
,
10707 /* Wait for tqp hw reset */
10708 usleep_range(1000, 1200);
10711 if (reset_try_times
>= HCLGE_TQP_RESET_TRY_TIMES
) {
10712 dev_err(&hdev
->pdev
->dev
,
10713 "wait for tqp hw reset timeout\n");
10717 ret
= hclge_reset_tqp_cmd_send(hdev
, queue_gid
, false);
10719 dev_err(&hdev
->pdev
->dev
,
10720 "failed to deassert soft reset, ret = %d\n",
10724 reset_try_times
= 0;
10729 static int hclge_reset_rcb(struct hnae3_handle
*handle
)
10731 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10732 #define HCLGE_RESET_RCB_SUCCESS 1U
10734 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10735 struct hclge_dev
*hdev
= vport
->back
;
10736 struct hclge_reset_cmd
*req
;
10737 struct hclge_desc desc
;
10742 queue_gid
= hclge_covert_handle_qid_global(handle
, 0);
10744 req
= (struct hclge_reset_cmd
*)desc
.data
;
10745 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_RST_TRIGGER
, false);
10746 hnae3_set_bit(req
->fun_reset_rcb
, HCLGE_CFG_RESET_RCB_B
, 1);
10747 req
->fun_reset_rcb_vqid_start
= cpu_to_le16(queue_gid
);
10748 req
->fun_reset_rcb_vqid_num
= cpu_to_le16(handle
->kinfo
.num_tqps
);
10750 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
10752 dev_err(&hdev
->pdev
->dev
,
10753 "failed to send rcb reset cmd, ret = %d\n", ret
);
10757 return_status
= req
->fun_reset_rcb_return_status
;
10758 if (return_status
== HCLGE_RESET_RCB_SUCCESS
)
10761 if (return_status
!= HCLGE_RESET_RCB_NOT_SUPPORT
) {
10762 dev_err(&hdev
->pdev
->dev
, "failed to reset rcb, ret = %u\n",
10767 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10768 * again to reset all tqps
10770 return hclge_reset_tqp_cmd(handle
);
10773 int hclge_reset_tqp(struct hnae3_handle
*handle
)
10775 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10776 struct hclge_dev
*hdev
= vport
->back
;
10779 /* only need to disable PF's tqp */
10780 if (!vport
->vport_id
) {
10781 ret
= hclge_tqp_enable(handle
, false);
10783 dev_err(&hdev
->pdev
->dev
,
10784 "failed to disable tqp, ret = %d\n", ret
);
10789 return hclge_reset_rcb(handle
);
10792 static u32
hclge_get_fw_version(struct hnae3_handle
*handle
)
10794 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10795 struct hclge_dev
*hdev
= vport
->back
;
10797 return hdev
->fw_version
;
10800 static void hclge_set_flowctrl_adv(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10802 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10807 phy_set_asym_pause(phydev
, rx_en
, tx_en
);
10810 static int hclge_cfg_pauseparam(struct hclge_dev
*hdev
, u32 rx_en
, u32 tx_en
)
10814 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
10817 ret
= hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
10819 dev_err(&hdev
->pdev
->dev
,
10820 "configure pauseparam error, ret = %d.\n", ret
);
10825 int hclge_cfg_flowctrl(struct hclge_dev
*hdev
)
10827 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10828 u16 remote_advertising
= 0;
10829 u16 local_advertising
;
10830 u32 rx_pause
, tx_pause
;
10836 if (!phydev
->autoneg
)
10837 return hclge_mac_pause_setup_hw(hdev
);
10839 local_advertising
= linkmode_adv_to_lcl_adv_t(phydev
->advertising
);
10842 remote_advertising
= LPA_PAUSE_CAP
;
10844 if (phydev
->asym_pause
)
10845 remote_advertising
|= LPA_PAUSE_ASYM
;
10847 flowctl
= mii_resolve_flowctrl_fdx(local_advertising
,
10848 remote_advertising
);
10849 tx_pause
= flowctl
& FLOW_CTRL_TX
;
10850 rx_pause
= flowctl
& FLOW_CTRL_RX
;
10852 if (phydev
->duplex
== HCLGE_MAC_HALF
) {
10857 return hclge_cfg_pauseparam(hdev
, rx_pause
, tx_pause
);
10860 static void hclge_get_pauseparam(struct hnae3_handle
*handle
, u32
*auto_neg
,
10861 u32
*rx_en
, u32
*tx_en
)
10863 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10864 struct hclge_dev
*hdev
= vport
->back
;
10865 u8 media_type
= hdev
->hw
.mac
.media_type
;
10867 *auto_neg
= (media_type
== HNAE3_MEDIA_TYPE_COPPER
) ?
10868 hclge_get_autoneg(handle
) : 0;
10870 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
10876 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_RX_PAUSE
) {
10879 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_TX_PAUSE
) {
10882 } else if (hdev
->tm_info
.fc_mode
== HCLGE_FC_FULL
) {
10891 static void hclge_record_user_pauseparam(struct hclge_dev
*hdev
,
10892 u32 rx_en
, u32 tx_en
)
10894 if (rx_en
&& tx_en
)
10895 hdev
->fc_mode_last_time
= HCLGE_FC_FULL
;
10896 else if (rx_en
&& !tx_en
)
10897 hdev
->fc_mode_last_time
= HCLGE_FC_RX_PAUSE
;
10898 else if (!rx_en
&& tx_en
)
10899 hdev
->fc_mode_last_time
= HCLGE_FC_TX_PAUSE
;
10901 hdev
->fc_mode_last_time
= HCLGE_FC_NONE
;
10903 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
10906 static int hclge_set_pauseparam(struct hnae3_handle
*handle
, u32 auto_neg
,
10907 u32 rx_en
, u32 tx_en
)
10909 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10910 struct hclge_dev
*hdev
= vport
->back
;
10911 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10914 if (phydev
|| hnae3_dev_phy_imp_supported(hdev
)) {
10915 fc_autoneg
= hclge_get_autoneg(handle
);
10916 if (auto_neg
!= fc_autoneg
) {
10917 dev_info(&hdev
->pdev
->dev
,
10918 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10919 return -EOPNOTSUPP
;
10923 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
) {
10924 dev_info(&hdev
->pdev
->dev
,
10925 "Priority flow control enabled. Cannot set link flow control.\n");
10926 return -EOPNOTSUPP
;
10929 hclge_set_flowctrl_adv(hdev
, rx_en
, tx_en
);
10931 hclge_record_user_pauseparam(hdev
, rx_en
, tx_en
);
10933 if (!auto_neg
|| hnae3_dev_phy_imp_supported(hdev
))
10934 return hclge_cfg_pauseparam(hdev
, rx_en
, tx_en
);
10937 return phy_start_aneg(phydev
);
10939 return -EOPNOTSUPP
;
10942 static void hclge_get_ksettings_an_result(struct hnae3_handle
*handle
,
10943 u8
*auto_neg
, u32
*speed
, u8
*duplex
, u32
*lane_num
)
10945 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10946 struct hclge_dev
*hdev
= vport
->back
;
10949 *speed
= hdev
->hw
.mac
.speed
;
10951 *duplex
= hdev
->hw
.mac
.duplex
;
10953 *auto_neg
= hdev
->hw
.mac
.autoneg
;
10955 *lane_num
= hdev
->hw
.mac
.lane_num
;
10958 static void hclge_get_media_type(struct hnae3_handle
*handle
, u8
*media_type
,
10961 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10962 struct hclge_dev
*hdev
= vport
->back
;
10964 /* When nic is down, the service task is not running, doesn't update
10965 * the port information per second. Query the port information before
10966 * return the media type, ensure getting the correct media information.
10968 hclge_update_port_info(hdev
);
10971 *media_type
= hdev
->hw
.mac
.media_type
;
10974 *module_type
= hdev
->hw
.mac
.module_type
;
10977 static void hclge_get_mdix_mode(struct hnae3_handle
*handle
,
10978 u8
*tp_mdix_ctrl
, u8
*tp_mdix
)
10980 struct hclge_vport
*vport
= hclge_get_vport(handle
);
10981 struct hclge_dev
*hdev
= vport
->back
;
10982 struct phy_device
*phydev
= hdev
->hw
.mac
.phydev
;
10983 int mdix_ctrl
, mdix
, is_resolved
;
10984 unsigned int retval
;
10987 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
10988 *tp_mdix
= ETH_TP_MDI_INVALID
;
10992 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_MDIX
);
10994 retval
= phy_read(phydev
, HCLGE_PHY_CSC_REG
);
10995 mdix_ctrl
= hnae3_get_field(retval
, HCLGE_PHY_MDIX_CTRL_M
,
10996 HCLGE_PHY_MDIX_CTRL_S
);
10998 retval
= phy_read(phydev
, HCLGE_PHY_CSS_REG
);
10999 mdix
= hnae3_get_bit(retval
, HCLGE_PHY_MDIX_STATUS_B
);
11000 is_resolved
= hnae3_get_bit(retval
, HCLGE_PHY_SPEED_DUP_RESOLVE_B
);
11002 phy_write(phydev
, HCLGE_PHY_PAGE_REG
, HCLGE_PHY_PAGE_COPPER
);
11004 switch (mdix_ctrl
) {
11006 *tp_mdix_ctrl
= ETH_TP_MDI
;
11009 *tp_mdix_ctrl
= ETH_TP_MDI_X
;
11012 *tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
11015 *tp_mdix_ctrl
= ETH_TP_MDI_INVALID
;
11020 *tp_mdix
= ETH_TP_MDI_INVALID
;
11022 *tp_mdix
= ETH_TP_MDI_X
;
11024 *tp_mdix
= ETH_TP_MDI
;
11027 static void hclge_info_show(struct hclge_dev
*hdev
)
11029 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
11030 struct device
*dev
= &hdev
->pdev
->dev
;
11032 dev_info(dev
, "PF info begin:\n");
11034 dev_info(dev
, "Task queue pairs numbers: %u\n", hdev
->num_tqps
);
11035 dev_info(dev
, "Desc num per TX queue: %u\n", hdev
->num_tx_desc
);
11036 dev_info(dev
, "Desc num per RX queue: %u\n", hdev
->num_rx_desc
);
11037 dev_info(dev
, "Numbers of vports: %u\n", hdev
->num_alloc_vport
);
11038 dev_info(dev
, "Numbers of VF for this PF: %u\n", hdev
->num_req_vfs
);
11039 dev_info(dev
, "HW tc map: 0x%x\n", hdev
->hw_tc_map
);
11040 dev_info(dev
, "Total buffer size for TX/RX: %u\n", hdev
->pkt_buf_size
);
11041 dev_info(dev
, "TX buffer size for each TC: %u\n", hdev
->tx_buf_size
);
11042 dev_info(dev
, "DV buffer size for each TC: %u\n", hdev
->dv_buf_size
);
11043 dev_info(dev
, "This is %s PF\n",
11044 hdev
->flag
& HCLGE_FLAG_MAIN
? "main" : "not main");
11045 dev_info(dev
, "DCB %s\n",
11046 handle
->kinfo
.tc_info
.dcb_ets_active
? "enable" : "disable");
11047 dev_info(dev
, "MQPRIO %s\n",
11048 handle
->kinfo
.tc_info
.mqprio_active
? "enable" : "disable");
11049 dev_info(dev
, "Default tx spare buffer size: %u\n",
11050 hdev
->tx_spare_buf_size
);
11052 dev_info(dev
, "PF info end.\n");
11055 static int hclge_init_nic_client_instance(struct hnae3_ae_dev
*ae_dev
,
11056 struct hclge_vport
*vport
)
11058 struct hnae3_client
*client
= vport
->nic
.client
;
11059 struct hclge_dev
*hdev
= ae_dev
->priv
;
11060 int rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11063 ret
= client
->ops
->init_instance(&vport
->nic
);
11067 set_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11068 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11069 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11074 /* Enable nic hw error interrupts */
11075 ret
= hclge_config_nic_hw_error(hdev
, true);
11077 dev_err(&ae_dev
->pdev
->dev
,
11078 "fail(%d) to enable hw error interrupts\n", ret
);
11082 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11084 if (netif_msg_drv(&hdev
->vport
->nic
))
11085 hclge_info_show(hdev
);
11090 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11091 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11092 msleep(HCLGE_WAIT_RESET_DONE
);
11094 client
->ops
->uninit_instance(&vport
->nic
, 0);
11099 static int hclge_init_roce_client_instance(struct hnae3_ae_dev
*ae_dev
,
11100 struct hclge_vport
*vport
)
11102 struct hclge_dev
*hdev
= ae_dev
->priv
;
11103 struct hnae3_client
*client
;
11107 if (!hnae3_dev_roce_supported(hdev
) || !hdev
->roce_client
||
11111 client
= hdev
->roce_client
;
11112 ret
= hclge_init_roce_base_info(vport
);
11116 rst_cnt
= hdev
->rst_stats
.reset_cnt
;
11117 ret
= client
->ops
->init_instance(&vport
->roce
);
11121 set_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11122 if (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
) ||
11123 rst_cnt
!= hdev
->rst_stats
.reset_cnt
) {
11125 goto init_roce_err
;
11128 /* Enable roce ras interrupts */
11129 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
11131 dev_err(&ae_dev
->pdev
->dev
,
11132 "fail(%d) to enable roce ras interrupts\n", ret
);
11133 goto init_roce_err
;
11136 hnae3_set_client_init_flag(client
, ae_dev
, 1);
11141 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11142 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11143 msleep(HCLGE_WAIT_RESET_DONE
);
11145 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11150 static int hclge_init_client_instance(struct hnae3_client
*client
,
11151 struct hnae3_ae_dev
*ae_dev
)
11153 struct hclge_dev
*hdev
= ae_dev
->priv
;
11154 struct hclge_vport
*vport
= &hdev
->vport
[0];
11157 switch (client
->type
) {
11158 case HNAE3_CLIENT_KNIC
:
11159 hdev
->nic_client
= client
;
11160 vport
->nic
.client
= client
;
11161 ret
= hclge_init_nic_client_instance(ae_dev
, vport
);
11165 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11170 case HNAE3_CLIENT_ROCE
:
11171 if (hnae3_dev_roce_supported(hdev
)) {
11172 hdev
->roce_client
= client
;
11173 vport
->roce
.client
= client
;
11176 ret
= hclge_init_roce_client_instance(ae_dev
, vport
);
11188 hdev
->nic_client
= NULL
;
11189 vport
->nic
.client
= NULL
;
11192 hdev
->roce_client
= NULL
;
11193 vport
->roce
.client
= NULL
;
11197 static void hclge_uninit_client_instance(struct hnae3_client
*client
,
11198 struct hnae3_ae_dev
*ae_dev
)
11200 struct hclge_dev
*hdev
= ae_dev
->priv
;
11201 struct hclge_vport
*vport
= &hdev
->vport
[0];
11203 if (hdev
->roce_client
) {
11204 clear_bit(HCLGE_STATE_ROCE_REGISTERED
, &hdev
->state
);
11205 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11206 msleep(HCLGE_WAIT_RESET_DONE
);
11208 hdev
->roce_client
->ops
->uninit_instance(&vport
->roce
, 0);
11209 hdev
->roce_client
= NULL
;
11210 vport
->roce
.client
= NULL
;
11212 if (client
->type
== HNAE3_CLIENT_ROCE
)
11214 if (hdev
->nic_client
&& client
->ops
->uninit_instance
) {
11215 clear_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
);
11216 while (test_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
))
11217 msleep(HCLGE_WAIT_RESET_DONE
);
11219 client
->ops
->uninit_instance(&vport
->nic
, 0);
11220 hdev
->nic_client
= NULL
;
11221 vport
->nic
.client
= NULL
;
11225 static int hclge_dev_mem_map(struct hclge_dev
*hdev
)
11227 struct pci_dev
*pdev
= hdev
->pdev
;
11228 struct hclge_hw
*hw
= &hdev
->hw
;
11230 /* for device does not have device memory, return directly */
11231 if (!(pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(HCLGE_MEM_BAR
)))
11235 devm_ioremap_wc(&pdev
->dev
,
11236 pci_resource_start(pdev
, HCLGE_MEM_BAR
),
11237 pci_resource_len(pdev
, HCLGE_MEM_BAR
));
11238 if (!hw
->hw
.mem_base
) {
11239 dev_err(&pdev
->dev
, "failed to map device memory\n");
11246 static int hclge_pci_init(struct hclge_dev
*hdev
)
11248 struct pci_dev
*pdev
= hdev
->pdev
;
11249 struct hclge_hw
*hw
;
11252 ret
= pci_enable_device(pdev
);
11254 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
11258 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
11260 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
11262 dev_err(&pdev
->dev
,
11263 "can't set consistent PCI DMA");
11264 goto err_disable_device
;
11266 dev_warn(&pdev
->dev
, "set DMA mask to 32 bits\n");
11269 ret
= pci_request_regions(pdev
, HCLGE_DRIVER_NAME
);
11271 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
11272 goto err_disable_device
;
11275 pci_set_master(pdev
);
11277 hw
->hw
.io_base
= pcim_iomap(pdev
, 2, 0);
11278 if (!hw
->hw
.io_base
) {
11279 dev_err(&pdev
->dev
, "Can't map configuration register space\n");
11281 goto err_release_regions
;
11284 ret
= hclge_dev_mem_map(hdev
);
11286 goto err_unmap_io_base
;
11288 hdev
->num_req_vfs
= pci_sriov_get_totalvfs(pdev
);
11293 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11294 err_release_regions
:
11295 pci_release_regions(pdev
);
11296 err_disable_device
:
11297 pci_disable_device(pdev
);
11302 static void hclge_pci_uninit(struct hclge_dev
*hdev
)
11304 struct pci_dev
*pdev
= hdev
->pdev
;
11306 if (hdev
->hw
.hw
.mem_base
)
11307 devm_iounmap(&pdev
->dev
, hdev
->hw
.hw
.mem_base
);
11309 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11310 pci_free_irq_vectors(pdev
);
11311 pci_release_mem_regions(pdev
);
11312 pci_disable_device(pdev
);
11315 static void hclge_state_init(struct hclge_dev
*hdev
)
11317 set_bit(HCLGE_STATE_SERVICE_INITED
, &hdev
->state
);
11318 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11319 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
11320 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11321 clear_bit(HCLGE_STATE_RST_FAIL
, &hdev
->state
);
11322 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
11323 clear_bit(HCLGE_STATE_MBX_HANDLING
, &hdev
->state
);
11326 static void hclge_state_uninit(struct hclge_dev
*hdev
)
11328 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
11329 set_bit(HCLGE_STATE_REMOVING
, &hdev
->state
);
11331 if (hdev
->reset_timer
.function
)
11332 del_timer_sync(&hdev
->reset_timer
);
11333 if (hdev
->service_task
.work
.func
)
11334 cancel_delayed_work_sync(&hdev
->service_task
);
11337 static void hclge_reset_prepare_general(struct hnae3_ae_dev
*ae_dev
,
11338 enum hnae3_reset_type rst_type
)
11340 #define HCLGE_RESET_RETRY_WAIT_MS 500
11341 #define HCLGE_RESET_RETRY_CNT 5
11343 struct hclge_dev
*hdev
= ae_dev
->priv
;
11347 while (retry_cnt
++ < HCLGE_RESET_RETRY_CNT
) {
11348 down(&hdev
->reset_sem
);
11349 set_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11350 hdev
->reset_type
= rst_type
;
11351 ret
= hclge_reset_prepare(hdev
);
11352 if (!ret
&& !hdev
->reset_pending
)
11355 dev_err(&hdev
->pdev
->dev
,
11356 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11357 ret
, hdev
->reset_pending
, retry_cnt
);
11358 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11359 up(&hdev
->reset_sem
);
11360 msleep(HCLGE_RESET_RETRY_WAIT_MS
);
11363 /* disable misc vector before reset done */
11364 hclge_enable_vector(&hdev
->misc_vector
, false);
11365 set_bit(HCLGE_COMM_STATE_CMD_DISABLE
, &hdev
->hw
.hw
.comm_state
);
11367 if (hdev
->reset_type
== HNAE3_FLR_RESET
)
11368 hdev
->rst_stats
.flr_rst_cnt
++;
11371 static void hclge_reset_done(struct hnae3_ae_dev
*ae_dev
)
11373 struct hclge_dev
*hdev
= ae_dev
->priv
;
11376 hclge_enable_vector(&hdev
->misc_vector
, true);
11378 ret
= hclge_reset_rebuild(hdev
);
11380 dev_err(&hdev
->pdev
->dev
, "fail to rebuild, ret=%d\n", ret
);
11382 hdev
->reset_type
= HNAE3_NONE_RESET
;
11383 clear_bit(HCLGE_STATE_RST_HANDLING
, &hdev
->state
);
11384 up(&hdev
->reset_sem
);
11387 static void hclge_clear_resetting_state(struct hclge_dev
*hdev
)
11391 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
11392 struct hclge_vport
*vport
= &hdev
->vport
[i
];
11395 /* Send cmd to clear vport's FUNC_RST_ING */
11396 ret
= hclge_set_vf_rst(hdev
, vport
->vport_id
, false);
11398 dev_warn(&hdev
->pdev
->dev
,
11399 "clear vport(%u) rst failed %d!\n",
11400 vport
->vport_id
, ret
);
11404 static int hclge_clear_hw_resource(struct hclge_dev
*hdev
)
11406 struct hclge_desc desc
;
11409 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CLEAR_HW_RESOURCE
, false);
11411 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11412 /* This new command is only supported by new firmware, it will
11413 * fail with older firmware. Error value -EOPNOSUPP can only be
11414 * returned by older firmware running this command, to keep code
11415 * backward compatible we will override this value and return
11418 if (ret
&& ret
!= -EOPNOTSUPP
) {
11419 dev_err(&hdev
->pdev
->dev
,
11420 "failed to clear hw resource, ret = %d\n", ret
);
11426 static void hclge_init_rxd_adv_layout(struct hclge_dev
*hdev
)
11428 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11429 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 1);
11432 static void hclge_uninit_rxd_adv_layout(struct hclge_dev
*hdev
)
11434 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev
->ae_dev
))
11435 hclge_write_dev(&hdev
->hw
, HCLGE_RXD_ADV_LAYOUT_EN_REG
, 0);
11438 static struct hclge_wol_info
*hclge_get_wol_info(struct hnae3_handle
*handle
)
11440 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11442 return &vport
->back
->hw
.mac
.wol
;
11445 static int hclge_get_wol_supported_mode(struct hclge_dev
*hdev
,
11446 u32
*wol_supported
)
11448 struct hclge_query_wol_supported_cmd
*wol_supported_cmd
;
11449 struct hclge_desc desc
;
11452 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_GET_SUPPORTED_MODE
,
11454 wol_supported_cmd
= (struct hclge_query_wol_supported_cmd
*)desc
.data
;
11456 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11458 dev_err(&hdev
->pdev
->dev
,
11459 "failed to query wol supported, ret = %d\n", ret
);
11463 *wol_supported
= le32_to_cpu(wol_supported_cmd
->supported_wake_mode
);
11468 static int hclge_set_wol_cfg(struct hclge_dev
*hdev
,
11469 struct hclge_wol_info
*wol_info
)
11471 struct hclge_wol_cfg_cmd
*wol_cfg_cmd
;
11472 struct hclge_desc desc
;
11475 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_WOL_CFG
, false);
11476 wol_cfg_cmd
= (struct hclge_wol_cfg_cmd
*)desc
.data
;
11477 wol_cfg_cmd
->wake_on_lan_mode
= cpu_to_le32(wol_info
->wol_current_mode
);
11478 wol_cfg_cmd
->sopass_size
= wol_info
->wol_sopass_size
;
11479 memcpy(wol_cfg_cmd
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11481 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
11483 dev_err(&hdev
->pdev
->dev
,
11484 "failed to set wol config, ret = %d\n", ret
);
11489 static int hclge_update_wol(struct hclge_dev
*hdev
)
11491 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11493 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11496 return hclge_set_wol_cfg(hdev
, wol_info
);
11499 static int hclge_init_wol(struct hclge_dev
*hdev
)
11501 struct hclge_wol_info
*wol_info
= &hdev
->hw
.mac
.wol
;
11504 if (!hnae3_ae_dev_wol_supported(hdev
->ae_dev
))
11507 memset(wol_info
, 0, sizeof(struct hclge_wol_info
));
11508 ret
= hclge_get_wol_supported_mode(hdev
,
11509 &wol_info
->wol_support_mode
);
11511 wol_info
->wol_support_mode
= 0;
11515 return hclge_update_wol(hdev
);
11518 static void hclge_get_wol(struct hnae3_handle
*handle
,
11519 struct ethtool_wolinfo
*wol
)
11521 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11523 wol
->supported
= wol_info
->wol_support_mode
;
11524 wol
->wolopts
= wol_info
->wol_current_mode
;
11525 if (wol_info
->wol_current_mode
& WAKE_MAGICSECURE
)
11526 memcpy(wol
->sopass
, wol_info
->wol_sopass
, SOPASS_MAX
);
11529 static int hclge_set_wol(struct hnae3_handle
*handle
,
11530 struct ethtool_wolinfo
*wol
)
11532 struct hclge_wol_info
*wol_info
= hclge_get_wol_info(handle
);
11533 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11537 wol_mode
= wol
->wolopts
;
11538 if (wol_mode
& ~wol_info
->wol_support_mode
)
11541 wol_info
->wol_current_mode
= wol_mode
;
11542 if (wol_mode
& WAKE_MAGICSECURE
) {
11543 memcpy(wol_info
->wol_sopass
, wol
->sopass
, SOPASS_MAX
);
11544 wol_info
->wol_sopass_size
= SOPASS_MAX
;
11546 wol_info
->wol_sopass_size
= 0;
11549 ret
= hclge_set_wol_cfg(vport
->back
, wol_info
);
11551 wol_info
->wol_current_mode
= 0;
11556 static int hclge_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
11558 struct pci_dev
*pdev
= ae_dev
->pdev
;
11559 struct hclge_dev
*hdev
;
11562 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
11567 hdev
->ae_dev
= ae_dev
;
11568 hdev
->reset_type
= HNAE3_NONE_RESET
;
11569 hdev
->reset_level
= HNAE3_FUNC_RESET
;
11570 ae_dev
->priv
= hdev
;
11572 /* HW supprt 2 layer vlan */
11573 hdev
->mps
= ETH_FRAME_LEN
+ ETH_FCS_LEN
+ 2 * VLAN_HLEN
;
11575 mutex_init(&hdev
->vport_lock
);
11576 spin_lock_init(&hdev
->fd_rule_lock
);
11577 sema_init(&hdev
->reset_sem
, 1);
11579 ret
= hclge_pci_init(hdev
);
11583 ret
= hclge_devlink_init(hdev
);
11585 goto err_pci_uninit
;
11587 /* Firmware command queue initialize */
11588 ret
= hclge_comm_cmd_queue_init(hdev
->pdev
, &hdev
->hw
.hw
);
11590 goto err_devlink_uninit
;
11592 /* Firmware command initialize */
11593 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
11594 true, hdev
->reset_pending
);
11596 goto err_cmd_uninit
;
11598 ret
= hclge_clear_hw_resource(hdev
);
11600 goto err_cmd_uninit
;
11602 ret
= hclge_get_cap(hdev
);
11604 goto err_cmd_uninit
;
11606 ret
= hclge_query_dev_specs(hdev
);
11608 dev_err(&pdev
->dev
, "failed to query dev specifications, ret = %d.\n",
11610 goto err_cmd_uninit
;
11613 ret
= hclge_configure(hdev
);
11615 dev_err(&pdev
->dev
, "Configure dev error, ret = %d.\n", ret
);
11616 goto err_cmd_uninit
;
11619 ret
= hclge_init_msi(hdev
);
11621 dev_err(&pdev
->dev
, "Init MSI/MSI-X error, ret = %d.\n", ret
);
11622 goto err_cmd_uninit
;
11625 ret
= hclge_misc_irq_init(hdev
);
11627 goto err_msi_uninit
;
11629 ret
= hclge_alloc_tqps(hdev
);
11631 dev_err(&pdev
->dev
, "Allocate TQPs error, ret = %d.\n", ret
);
11632 goto err_msi_irq_uninit
;
11635 ret
= hclge_alloc_vport(hdev
);
11637 goto err_msi_irq_uninit
;
11639 ret
= hclge_map_tqp(hdev
);
11641 goto err_msi_irq_uninit
;
11643 if (hdev
->hw
.mac
.media_type
== HNAE3_MEDIA_TYPE_COPPER
) {
11644 if (hnae3_dev_phy_imp_supported(hdev
))
11645 ret
= hclge_update_tp_port_info(hdev
);
11647 ret
= hclge_mac_mdio_config(hdev
);
11650 goto err_msi_irq_uninit
;
11653 ret
= hclge_init_umv_space(hdev
);
11655 goto err_mdiobus_unreg
;
11657 ret
= hclge_mac_init(hdev
);
11659 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
11660 goto err_mdiobus_unreg
;
11663 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
11665 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
11666 goto err_mdiobus_unreg
;
11669 ret
= hclge_config_gro(hdev
);
11671 goto err_mdiobus_unreg
;
11673 ret
= hclge_init_vlan_config(hdev
);
11675 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
11676 goto err_mdiobus_unreg
;
11679 ret
= hclge_tm_schd_init(hdev
);
11681 dev_err(&pdev
->dev
, "tm schd init fail, ret =%d\n", ret
);
11682 goto err_mdiobus_unreg
;
11685 ret
= hclge_comm_rss_init_cfg(&hdev
->vport
->nic
, hdev
->ae_dev
,
11688 dev_err(&pdev
->dev
, "failed to init rss cfg, ret = %d\n", ret
);
11689 goto err_mdiobus_unreg
;
11692 ret
= hclge_rss_init_hw(hdev
);
11694 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
11695 goto err_mdiobus_unreg
;
11698 ret
= init_mgr_tbl(hdev
);
11700 dev_err(&pdev
->dev
, "manager table init fail, ret =%d\n", ret
);
11701 goto err_mdiobus_unreg
;
11704 ret
= hclge_init_fd_config(hdev
);
11706 dev_err(&pdev
->dev
,
11707 "fd table init fail, ret=%d\n", ret
);
11708 goto err_mdiobus_unreg
;
11711 ret
= hclge_ptp_init(hdev
);
11713 goto err_mdiobus_unreg
;
11715 ret
= hclge_update_port_info(hdev
);
11717 goto err_mdiobus_unreg
;
11719 INIT_KFIFO(hdev
->mac_tnl_log
);
11721 hclge_dcb_ops_set(hdev
);
11723 timer_setup(&hdev
->reset_timer
, hclge_reset_timer
, 0);
11724 INIT_DELAYED_WORK(&hdev
->service_task
, hclge_service_task
);
11726 hclge_clear_all_event_cause(hdev
);
11727 hclge_clear_resetting_state(hdev
);
11729 /* Log and clear the hw errors those already occurred */
11730 if (hnae3_dev_ras_imp_supported(hdev
))
11731 hclge_handle_occurred_error(hdev
);
11733 hclge_handle_all_hns_hw_errors(ae_dev
);
11735 /* request delayed reset for the error recovery because an immediate
11736 * global reset on a PF affecting pending initialization of other PFs
11738 if (ae_dev
->hw_err_reset_req
) {
11739 enum hnae3_reset_type reset_level
;
11741 reset_level
= hclge_get_reset_level(ae_dev
,
11742 &ae_dev
->hw_err_reset_req
);
11743 hclge_set_def_reset_request(ae_dev
, reset_level
);
11744 mod_timer(&hdev
->reset_timer
, jiffies
+ HCLGE_RESET_INTERVAL
);
11747 hclge_init_rxd_adv_layout(hdev
);
11749 /* Enable MISC vector(vector0) */
11750 hclge_enable_vector(&hdev
->misc_vector
, true);
11752 ret
= hclge_init_wol(hdev
);
11754 dev_warn(&pdev
->dev
,
11755 "failed to wake on lan init, ret = %d\n", ret
);
11757 hclge_state_init(hdev
);
11758 hdev
->last_reset_time
= jiffies
;
11760 dev_info(&hdev
->pdev
->dev
, "%s driver initialization finished.\n",
11761 HCLGE_DRIVER_NAME
);
11763 hclge_task_schedule(hdev
, round_jiffies_relative(HZ
));
11768 if (hdev
->hw
.mac
.phydev
)
11769 mdiobus_unregister(hdev
->hw
.mac
.mdio_bus
);
11770 err_msi_irq_uninit
:
11771 hclge_misc_irq_uninit(hdev
);
11773 pci_free_irq_vectors(pdev
);
11775 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
11776 err_devlink_uninit
:
11777 hclge_devlink_uninit(hdev
);
11779 pcim_iounmap(pdev
, hdev
->hw
.hw
.io_base
);
11780 pci_release_regions(pdev
);
11781 pci_disable_device(pdev
);
11783 mutex_destroy(&hdev
->vport_lock
);
11787 static void hclge_stats_clear(struct hclge_dev
*hdev
)
11789 memset(&hdev
->mac_stats
, 0, sizeof(hdev
->mac_stats
));
11790 memset(&hdev
->fec_stats
, 0, sizeof(hdev
->fec_stats
));
11793 static int hclge_set_mac_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11795 return hclge_config_switch_param(hdev
, vf
, enable
,
11796 HCLGE_SWITCH_ANTI_SPOOF_MASK
);
11799 static int hclge_set_vlan_spoofchk(struct hclge_dev
*hdev
, int vf
, bool enable
)
11801 return hclge_set_vlan_filter_ctrl(hdev
, HCLGE_FILTER_TYPE_VF
,
11802 HCLGE_FILTER_FE_NIC_INGRESS_B
,
11806 static int hclge_set_vf_spoofchk_hw(struct hclge_dev
*hdev
, int vf
, bool enable
)
11810 ret
= hclge_set_mac_spoofchk(hdev
, vf
, enable
);
11812 dev_err(&hdev
->pdev
->dev
,
11813 "Set vf %d mac spoof check %s failed, ret=%d\n",
11814 vf
, enable
? "on" : "off", ret
);
11818 ret
= hclge_set_vlan_spoofchk(hdev
, vf
, enable
);
11820 dev_err(&hdev
->pdev
->dev
,
11821 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11822 vf
, enable
? "on" : "off", ret
);
11827 static int hclge_set_vf_spoofchk(struct hnae3_handle
*handle
, int vf
,
11830 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11831 struct hclge_dev
*hdev
= vport
->back
;
11832 u32 new_spoofchk
= enable
? 1 : 0;
11835 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
11836 return -EOPNOTSUPP
;
11838 vport
= hclge_get_vf_vport(hdev
, vf
);
11842 if (vport
->vf_info
.spoofchk
== new_spoofchk
)
11845 if (enable
&& test_bit(vport
->vport_id
, hdev
->vf_vlan_full
))
11846 dev_warn(&hdev
->pdev
->dev
,
11847 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11849 else if (enable
&& hclge_is_umv_space_full(vport
, true))
11850 dev_warn(&hdev
->pdev
->dev
,
11851 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11854 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, enable
);
11858 vport
->vf_info
.spoofchk
= new_spoofchk
;
11862 static int hclge_reset_vport_spoofchk(struct hclge_dev
*hdev
)
11864 struct hclge_vport
*vport
= hdev
->vport
;
11868 if (hdev
->ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
11871 /* resume the vf spoof check state after reset */
11872 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
11873 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
,
11874 vport
->vf_info
.spoofchk
);
11884 static int hclge_set_vf_trust(struct hnae3_handle
*handle
, int vf
, bool enable
)
11886 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11887 struct hclge_dev
*hdev
= vport
->back
;
11888 u32 new_trusted
= enable
? 1 : 0;
11890 vport
= hclge_get_vf_vport(hdev
, vf
);
11894 if (vport
->vf_info
.trusted
== new_trusted
)
11897 vport
->vf_info
.trusted
= new_trusted
;
11898 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
11899 hclge_task_schedule(hdev
, 0);
11904 static void hclge_reset_vf_rate(struct hclge_dev
*hdev
)
11909 /* reset vf rate to default value */
11910 for (vf
= HCLGE_VF_VPORT_START_NUM
; vf
< hdev
->num_alloc_vport
; vf
++) {
11911 struct hclge_vport
*vport
= &hdev
->vport
[vf
];
11913 vport
->vf_info
.max_tx_rate
= 0;
11914 ret
= hclge_tm_qs_shaper_cfg(vport
, vport
->vf_info
.max_tx_rate
);
11916 dev_err(&hdev
->pdev
->dev
,
11917 "vf%d failed to reset to default, ret=%d\n",
11918 vf
- HCLGE_VF_VPORT_START_NUM
, ret
);
11922 static int hclge_vf_rate_param_check(struct hclge_dev
*hdev
,
11923 int min_tx_rate
, int max_tx_rate
)
11925 if (min_tx_rate
!= 0 ||
11926 max_tx_rate
< 0 || max_tx_rate
> hdev
->hw
.mac
.max_speed
) {
11927 dev_err(&hdev
->pdev
->dev
,
11928 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11929 min_tx_rate
, max_tx_rate
, hdev
->hw
.mac
.max_speed
);
11936 static int hclge_set_vf_rate(struct hnae3_handle
*handle
, int vf
,
11937 int min_tx_rate
, int max_tx_rate
, bool force
)
11939 struct hclge_vport
*vport
= hclge_get_vport(handle
);
11940 struct hclge_dev
*hdev
= vport
->back
;
11943 ret
= hclge_vf_rate_param_check(hdev
, min_tx_rate
, max_tx_rate
);
11947 vport
= hclge_get_vf_vport(hdev
, vf
);
11951 if (!force
&& max_tx_rate
== vport
->vf_info
.max_tx_rate
)
11954 ret
= hclge_tm_qs_shaper_cfg(vport
, max_tx_rate
);
11958 vport
->vf_info
.max_tx_rate
= max_tx_rate
;
11963 static int hclge_resume_vf_rate(struct hclge_dev
*hdev
)
11965 struct hnae3_handle
*handle
= &hdev
->vport
->nic
;
11966 struct hclge_vport
*vport
;
11970 /* resume the vf max_tx_rate after reset */
11971 for (vf
= 0; vf
< pci_num_vf(hdev
->pdev
); vf
++) {
11972 vport
= hclge_get_vf_vport(hdev
, vf
);
11976 /* zero means max rate, after reset, firmware already set it to
11977 * max rate, so just continue.
11979 if (!vport
->vf_info
.max_tx_rate
)
11982 ret
= hclge_set_vf_rate(handle
, vf
, 0,
11983 vport
->vf_info
.max_tx_rate
, true);
11985 dev_err(&hdev
->pdev
->dev
,
11986 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11987 vf
, vport
->vf_info
.max_tx_rate
, ret
);
11995 static void hclge_reset_vport_state(struct hclge_dev
*hdev
)
11997 struct hclge_vport
*vport
= hdev
->vport
;
12000 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12001 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12006 static int hclge_reset_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12008 struct hclge_dev
*hdev
= ae_dev
->priv
;
12009 struct pci_dev
*pdev
= ae_dev
->pdev
;
12012 set_bit(HCLGE_STATE_DOWN
, &hdev
->state
);
12014 hclge_stats_clear(hdev
);
12015 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12016 * so here should not clean table in memory.
12018 if (hdev
->reset_type
== HNAE3_IMP_RESET
||
12019 hdev
->reset_type
== HNAE3_GLOBAL_RESET
) {
12020 memset(hdev
->vlan_table
, 0, sizeof(hdev
->vlan_table
));
12021 memset(hdev
->vf_vlan_full
, 0, sizeof(hdev
->vf_vlan_full
));
12022 bitmap_set(hdev
->vport_config_block
, 0, hdev
->num_alloc_vport
);
12023 hclge_reset_umv_space(hdev
);
12026 ret
= hclge_comm_cmd_init(hdev
->ae_dev
, &hdev
->hw
.hw
, &hdev
->fw_version
,
12027 true, hdev
->reset_pending
);
12029 dev_err(&pdev
->dev
, "Cmd queue init failed\n");
12033 ret
= hclge_map_tqp(hdev
);
12035 dev_err(&pdev
->dev
, "Map tqp error, ret = %d.\n", ret
);
12039 ret
= hclge_mac_init(hdev
);
12041 dev_err(&pdev
->dev
, "Mac init error, ret = %d\n", ret
);
12045 ret
= hclge_tp_port_init(hdev
);
12047 dev_err(&pdev
->dev
, "failed to init tp port, ret = %d\n",
12052 ret
= hclge_config_tso(hdev
, HCLGE_TSO_MSS_MIN
, HCLGE_TSO_MSS_MAX
);
12054 dev_err(&pdev
->dev
, "Enable tso fail, ret =%d\n", ret
);
12058 ret
= hclge_config_gro(hdev
);
12062 ret
= hclge_init_vlan_config(hdev
);
12064 dev_err(&pdev
->dev
, "VLAN init fail, ret =%d\n", ret
);
12068 ret
= hclge_tm_init_hw(hdev
, true);
12070 dev_err(&pdev
->dev
, "tm init hw fail, ret =%d\n", ret
);
12074 ret
= hclge_rss_init_hw(hdev
);
12076 dev_err(&pdev
->dev
, "Rss init fail, ret =%d\n", ret
);
12080 ret
= init_mgr_tbl(hdev
);
12082 dev_err(&pdev
->dev
,
12083 "failed to reinit manager table, ret = %d\n", ret
);
12087 ret
= hclge_init_fd_config(hdev
);
12089 dev_err(&pdev
->dev
, "fd table init fail, ret=%d\n", ret
);
12093 ret
= hclge_ptp_init(hdev
);
12097 /* Log and clear the hw errors those already occurred */
12098 if (hnae3_dev_ras_imp_supported(hdev
))
12099 hclge_handle_occurred_error(hdev
);
12101 hclge_handle_all_hns_hw_errors(ae_dev
);
12103 /* Re-enable the hw error interrupts because
12104 * the interrupts get disabled on global reset.
12106 ret
= hclge_config_nic_hw_error(hdev
, true);
12108 dev_err(&pdev
->dev
,
12109 "fail(%d) to re-enable NIC hw error interrupts\n",
12114 if (hdev
->roce_client
) {
12115 ret
= hclge_config_rocee_ras_interrupt(hdev
, true);
12117 dev_err(&pdev
->dev
,
12118 "fail(%d) to re-enable roce ras interrupts\n",
12124 hclge_reset_vport_state(hdev
);
12125 ret
= hclge_reset_vport_spoofchk(hdev
);
12129 ret
= hclge_resume_vf_rate(hdev
);
12133 hclge_init_rxd_adv_layout(hdev
);
12135 ret
= hclge_update_wol(hdev
);
12137 dev_warn(&pdev
->dev
,
12138 "failed to update wol config, ret = %d\n", ret
);
12140 dev_info(&pdev
->dev
, "Reset done, %s driver initialization finished.\n",
12141 HCLGE_DRIVER_NAME
);
12146 static void hclge_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
12148 struct hclge_dev
*hdev
= ae_dev
->priv
;
12149 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
12151 hclge_reset_vf_rate(hdev
);
12152 hclge_clear_vf_vlan(hdev
);
12153 hclge_state_uninit(hdev
);
12154 hclge_ptp_uninit(hdev
);
12155 hclge_uninit_rxd_adv_layout(hdev
);
12156 hclge_uninit_mac_table(hdev
);
12157 hclge_del_all_fd_entries(hdev
);
12160 mdiobus_unregister(mac
->mdio_bus
);
12162 /* Disable MISC vector(vector0) */
12163 hclge_enable_vector(&hdev
->misc_vector
, false);
12164 synchronize_irq(hdev
->misc_vector
.vector_irq
);
12166 /* Disable all hw interrupts */
12167 hclge_config_mac_tnl_int(hdev
, false);
12168 hclge_config_nic_hw_error(hdev
, false);
12169 hclge_config_rocee_ras_interrupt(hdev
, false);
12171 hclge_comm_cmd_uninit(hdev
->ae_dev
, &hdev
->hw
.hw
);
12172 hclge_misc_irq_uninit(hdev
);
12173 hclge_devlink_uninit(hdev
);
12174 hclge_pci_uninit(hdev
);
12175 hclge_uninit_vport_vlan_table(hdev
);
12176 mutex_destroy(&hdev
->vport_lock
);
12177 ae_dev
->priv
= NULL
;
12180 static u32
hclge_get_max_channels(struct hnae3_handle
*handle
)
12182 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12183 struct hclge_dev
*hdev
= vport
->back
;
12185 return min_t(u32
, hdev
->pf_rss_size_max
, vport
->alloc_tqps
);
12188 static void hclge_get_channels(struct hnae3_handle
*handle
,
12189 struct ethtool_channels
*ch
)
12191 ch
->max_combined
= hclge_get_max_channels(handle
);
12192 ch
->other_count
= 1;
12194 ch
->combined_count
= handle
->kinfo
.rss_size
;
12197 static void hclge_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
12198 u16
*alloc_tqps
, u16
*max_rss_size
)
12200 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12201 struct hclge_dev
*hdev
= vport
->back
;
12203 *alloc_tqps
= vport
->alloc_tqps
;
12204 *max_rss_size
= hdev
->pf_rss_size_max
;
12207 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle
*handle
)
12209 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12210 u16 tc_offset
[HCLGE_MAX_TC_NUM
] = {0};
12211 struct hclge_dev
*hdev
= vport
->back
;
12212 u16 tc_size
[HCLGE_MAX_TC_NUM
] = {0};
12213 u16 tc_valid
[HCLGE_MAX_TC_NUM
];
12217 roundup_size
= roundup_pow_of_two(vport
->nic
.kinfo
.rss_size
);
12218 roundup_size
= ilog2(roundup_size
);
12219 /* Set the RSS TC mode according to the new RSS size */
12220 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++) {
12223 if (!(hdev
->hw_tc_map
& BIT(i
)))
12227 tc_size
[i
] = roundup_size
;
12228 tc_offset
[i
] = vport
->nic
.kinfo
.rss_size
* i
;
12231 return hclge_comm_set_rss_tc_mode(&hdev
->hw
.hw
, tc_offset
, tc_valid
,
12235 static int hclge_set_channels(struct hnae3_handle
*handle
, u32 new_tqps_num
,
12236 bool rxfh_configured
)
12238 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
12239 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12240 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
12241 struct hclge_dev
*hdev
= vport
->back
;
12242 u16 cur_rss_size
= kinfo
->rss_size
;
12243 u16 cur_tqps
= kinfo
->num_tqps
;
12248 kinfo
->req_rss_size
= new_tqps_num
;
12250 ret
= hclge_tm_vport_map_update(hdev
);
12252 dev_err(&hdev
->pdev
->dev
, "tm vport map fail, ret =%d\n", ret
);
12256 ret
= hclge_set_rss_tc_mode_cfg(handle
);
12260 /* RSS indirection table has been configured by user */
12261 if (rxfh_configured
)
12264 /* Reinitializes the rss indirect table according to the new RSS size */
12265 rss_indir
= kcalloc(ae_dev
->dev_specs
.rss_ind_tbl_size
, sizeof(u32
),
12270 for (i
= 0; i
< ae_dev
->dev_specs
.rss_ind_tbl_size
; i
++)
12271 rss_indir
[i
] = i
% kinfo
->rss_size
;
12273 ret
= hclge_set_rss(handle
, rss_indir
, NULL
, 0);
12275 dev_err(&hdev
->pdev
->dev
, "set rss indir table fail, ret=%d\n",
12282 dev_info(&hdev
->pdev
->dev
,
12283 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12284 cur_rss_size
, kinfo
->rss_size
,
12285 cur_tqps
, kinfo
->rss_size
* kinfo
->tc_info
.num_tc
);
12290 static int hclge_set_led_status(struct hclge_dev
*hdev
, u8 locate_led_status
)
12292 struct hclge_set_led_state_cmd
*req
;
12293 struct hclge_desc desc
;
12296 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_LED_STATUS_CFG
, false);
12298 req
= (struct hclge_set_led_state_cmd
*)desc
.data
;
12299 hnae3_set_field(req
->locate_led_config
, HCLGE_LED_LOCATE_STATE_M
,
12300 HCLGE_LED_LOCATE_STATE_S
, locate_led_status
);
12302 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12304 dev_err(&hdev
->pdev
->dev
,
12305 "Send set led state cmd error, ret =%d\n", ret
);
12310 enum hclge_led_status
{
12313 HCLGE_LED_NO_CHANGE
= 0xFF,
12316 static int hclge_set_led_id(struct hnae3_handle
*handle
,
12317 enum ethtool_phys_id_state status
)
12319 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12320 struct hclge_dev
*hdev
= vport
->back
;
12323 case ETHTOOL_ID_ACTIVE
:
12324 return hclge_set_led_status(hdev
, HCLGE_LED_ON
);
12325 case ETHTOOL_ID_INACTIVE
:
12326 return hclge_set_led_status(hdev
, HCLGE_LED_OFF
);
12332 static void hclge_get_link_mode(struct hnae3_handle
*handle
,
12333 unsigned long *supported
,
12334 unsigned long *advertising
)
12336 unsigned int size
= BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS
);
12337 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12338 struct hclge_dev
*hdev
= vport
->back
;
12339 unsigned int idx
= 0;
12341 for (; idx
< size
; idx
++) {
12342 supported
[idx
] = hdev
->hw
.mac
.supported
[idx
];
12343 advertising
[idx
] = hdev
->hw
.mac
.advertising
[idx
];
12347 static int hclge_gro_en(struct hnae3_handle
*handle
, bool enable
)
12349 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12350 struct hclge_dev
*hdev
= vport
->back
;
12351 bool gro_en_old
= hdev
->gro_en
;
12354 hdev
->gro_en
= enable
;
12355 ret
= hclge_config_gro(hdev
);
12357 hdev
->gro_en
= gro_en_old
;
12362 static int hclge_sync_vport_promisc_mode(struct hclge_vport
*vport
)
12364 struct hnae3_handle
*handle
= &vport
->nic
;
12365 struct hclge_dev
*hdev
= vport
->back
;
12366 bool uc_en
= false;
12367 bool mc_en
= false;
12372 if (vport
->last_promisc_flags
!= vport
->overflow_promisc_flags
) {
12373 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12374 vport
->last_promisc_flags
= vport
->overflow_promisc_flags
;
12377 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12382 if (!vport
->vport_id
) {
12383 tmp_flags
= handle
->netdev_flags
| vport
->last_promisc_flags
;
12384 ret
= hclge_set_promisc_mode(handle
, tmp_flags
& HNAE3_UPE
,
12385 tmp_flags
& HNAE3_MPE
);
12387 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE
,
12390 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
,
12396 if (vport
->vf_info
.trusted
) {
12397 uc_en
= vport
->vf_info
.request_uc_en
> 0 ||
12398 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_UPE
;
12399 mc_en
= vport
->vf_info
.request_mc_en
> 0 ||
12400 vport
->overflow_promisc_flags
& HNAE3_OVERFLOW_MPE
;
12402 bc_en
= vport
->vf_info
.request_bc_en
> 0;
12404 ret
= hclge_cmd_set_promisc_mode(hdev
, vport
->vport_id
, uc_en
,
12407 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE
, &vport
->state
);
12410 hclge_set_vport_vlan_fltr_change(vport
);
12415 static void hclge_sync_promisc_mode(struct hclge_dev
*hdev
)
12417 struct hclge_vport
*vport
;
12421 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
12422 vport
= &hdev
->vport
[i
];
12424 ret
= hclge_sync_vport_promisc_mode(vport
);
12430 static bool hclge_module_existed(struct hclge_dev
*hdev
)
12432 struct hclge_desc desc
;
12436 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_GET_SFP_EXIST
, true);
12437 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12439 dev_err(&hdev
->pdev
->dev
,
12440 "failed to get SFP exist state, ret = %d\n", ret
);
12444 existed
= le32_to_cpu(desc
.data
[0]);
12446 return existed
!= 0;
12449 /* need 6 bds(total 140 bytes) in one reading
12450 * return the number of bytes actually read, 0 means read failed.
12452 static u16
hclge_get_sfp_eeprom_info(struct hclge_dev
*hdev
, u32 offset
,
12455 struct hclge_desc desc
[HCLGE_SFP_INFO_CMD_NUM
];
12456 struct hclge_sfp_info_bd0_cmd
*sfp_info_bd0
;
12462 /* setup all 6 bds to read module eeprom info. */
12463 for (i
= 0; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12464 hclge_cmd_setup_basic_desc(&desc
[i
], HCLGE_OPC_GET_SFP_EEPROM
,
12467 /* bd0~bd4 need next flag */
12468 if (i
< HCLGE_SFP_INFO_CMD_NUM
- 1)
12469 desc
[i
].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
12472 /* setup bd0, this bd contains offset and read length. */
12473 sfp_info_bd0
= (struct hclge_sfp_info_bd0_cmd
*)desc
[0].data
;
12474 sfp_info_bd0
->offset
= cpu_to_le16((u16
)offset
);
12475 read_len
= min_t(u16
, len
, HCLGE_SFP_INFO_MAX_LEN
);
12476 sfp_info_bd0
->read_len
= cpu_to_le16(read_len
);
12478 ret
= hclge_cmd_send(&hdev
->hw
, desc
, i
);
12480 dev_err(&hdev
->pdev
->dev
,
12481 "failed to get SFP eeprom info, ret = %d\n", ret
);
12485 /* copy sfp info from bd0 to out buffer. */
12486 copy_len
= min_t(u16
, len
, HCLGE_SFP_INFO_BD0_LEN
);
12487 memcpy(data
, sfp_info_bd0
->data
, copy_len
);
12488 read_len
= copy_len
;
12490 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12491 for (i
= 1; i
< HCLGE_SFP_INFO_CMD_NUM
; i
++) {
12492 if (read_len
>= len
)
12495 copy_len
= min_t(u16
, len
- read_len
, HCLGE_SFP_INFO_BDX_LEN
);
12496 memcpy(data
+ read_len
, desc
[i
].data
, copy_len
);
12497 read_len
+= copy_len
;
12503 static int hclge_get_module_eeprom(struct hnae3_handle
*handle
, u32 offset
,
12506 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12507 struct hclge_dev
*hdev
= vport
->back
;
12511 if (hdev
->hw
.mac
.media_type
!= HNAE3_MEDIA_TYPE_FIBER
)
12512 return -EOPNOTSUPP
;
12514 if (!hclge_module_existed(hdev
))
12517 while (read_len
< len
) {
12518 data_len
= hclge_get_sfp_eeprom_info(hdev
,
12525 read_len
+= data_len
;
12531 static int hclge_get_link_diagnosis_info(struct hnae3_handle
*handle
,
12534 struct hclge_vport
*vport
= hclge_get_vport(handle
);
12535 struct hclge_dev
*hdev
= vport
->back
;
12536 struct hclge_desc desc
;
12539 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
)
12540 return -EOPNOTSUPP
;
12542 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_LINK_DIAGNOSIS
, true);
12543 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
12545 dev_err(&hdev
->pdev
->dev
,
12546 "failed to query link diagnosis info, ret = %d\n", ret
);
12550 *status_code
= le32_to_cpu(desc
.data
[0]);
12554 /* After disable sriov, VF still has some config and info need clean,
12555 * which configed by PF.
12557 static void hclge_clear_vport_vf_info(struct hclge_vport
*vport
, int vfid
)
12559 struct hclge_dev
*hdev
= vport
->back
;
12560 struct hclge_vlan_info vlan_info
;
12563 clear_bit(HCLGE_VPORT_STATE_INITED
, &vport
->state
);
12564 clear_bit(HCLGE_VPORT_STATE_ALIVE
, &vport
->state
);
12565 vport
->need_notify
= 0;
12568 /* after disable sriov, clean VF rate configured by PF */
12569 ret
= hclge_tm_qs_shaper_cfg(vport
, 0);
12571 dev_err(&hdev
->pdev
->dev
,
12572 "failed to clean vf%d rate config, ret = %d\n",
12575 vlan_info
.vlan_tag
= 0;
12577 vlan_info
.vlan_proto
= ETH_P_8021Q
;
12578 ret
= hclge_update_port_base_vlan_cfg(vport
,
12579 HNAE3_PORT_BASE_VLAN_DISABLE
,
12582 dev_err(&hdev
->pdev
->dev
,
12583 "failed to clean vf%d port base vlan, ret = %d\n",
12586 ret
= hclge_set_vf_spoofchk_hw(hdev
, vport
->vport_id
, false);
12588 dev_err(&hdev
->pdev
->dev
,
12589 "failed to clean vf%d spoof config, ret = %d\n",
12592 memset(&vport
->vf_info
, 0, sizeof(vport
->vf_info
));
12595 static void hclge_clean_vport_config(struct hnae3_ae_dev
*ae_dev
, int num_vfs
)
12597 struct hclge_dev
*hdev
= ae_dev
->priv
;
12598 struct hclge_vport
*vport
;
12601 for (i
= 0; i
< num_vfs
; i
++) {
12602 vport
= &hdev
->vport
[i
+ HCLGE_VF_VPORT_START_NUM
];
12604 hclge_clear_vport_vf_info(vport
, i
);
12608 static int hclge_get_dscp_prio(struct hnae3_handle
*h
, u8 dscp
, u8
*tc_mode
,
12611 struct hclge_vport
*vport
= hclge_get_vport(h
);
12613 if (dscp
>= HNAE3_MAX_DSCP
)
12617 *tc_mode
= vport
->nic
.kinfo
.tc_map_mode
;
12619 *priority
= vport
->nic
.kinfo
.dscp_prio
[dscp
] == HNAE3_PRIO_ID_INVALID
? 0 :
12620 vport
->nic
.kinfo
.dscp_prio
[dscp
];
12625 static const struct hnae3_ae_ops hclge_ops
= {
12626 .init_ae_dev
= hclge_init_ae_dev
,
12627 .uninit_ae_dev
= hclge_uninit_ae_dev
,
12628 .reset_prepare
= hclge_reset_prepare_general
,
12629 .reset_done
= hclge_reset_done
,
12630 .init_client_instance
= hclge_init_client_instance
,
12631 .uninit_client_instance
= hclge_uninit_client_instance
,
12632 .map_ring_to_vector
= hclge_map_ring_to_vector
,
12633 .unmap_ring_from_vector
= hclge_unmap_ring_frm_vector
,
12634 .get_vector
= hclge_get_vector
,
12635 .put_vector
= hclge_put_vector
,
12636 .set_promisc_mode
= hclge_set_promisc_mode
,
12637 .request_update_promisc_mode
= hclge_request_update_promisc_mode
,
12638 .set_loopback
= hclge_set_loopback
,
12639 .start
= hclge_ae_start
,
12640 .stop
= hclge_ae_stop
,
12641 .client_start
= hclge_client_start
,
12642 .client_stop
= hclge_client_stop
,
12643 .get_status
= hclge_get_status
,
12644 .get_ksettings_an_result
= hclge_get_ksettings_an_result
,
12645 .cfg_mac_speed_dup_h
= hclge_cfg_mac_speed_dup_h
,
12646 .get_media_type
= hclge_get_media_type
,
12647 .check_port_speed
= hclge_check_port_speed
,
12648 .get_fec_stats
= hclge_get_fec_stats
,
12649 .get_fec
= hclge_get_fec
,
12650 .set_fec
= hclge_set_fec
,
12651 .get_rss_key_size
= hclge_comm_get_rss_key_size
,
12652 .get_rss
= hclge_get_rss
,
12653 .set_rss
= hclge_set_rss
,
12654 .set_rss_tuple
= hclge_set_rss_tuple
,
12655 .get_rss_tuple
= hclge_get_rss_tuple
,
12656 .get_tc_size
= hclge_get_tc_size
,
12657 .get_mac_addr
= hclge_get_mac_addr
,
12658 .set_mac_addr
= hclge_set_mac_addr
,
12659 .do_ioctl
= hclge_do_ioctl
,
12660 .add_uc_addr
= hclge_add_uc_addr
,
12661 .rm_uc_addr
= hclge_rm_uc_addr
,
12662 .add_mc_addr
= hclge_add_mc_addr
,
12663 .rm_mc_addr
= hclge_rm_mc_addr
,
12664 .set_autoneg
= hclge_set_autoneg
,
12665 .get_autoneg
= hclge_get_autoneg
,
12666 .restart_autoneg
= hclge_restart_autoneg
,
12667 .halt_autoneg
= hclge_halt_autoneg
,
12668 .get_pauseparam
= hclge_get_pauseparam
,
12669 .set_pauseparam
= hclge_set_pauseparam
,
12670 .set_mtu
= hclge_set_mtu
,
12671 .reset_queue
= hclge_reset_tqp
,
12672 .get_stats
= hclge_get_stats
,
12673 .get_mac_stats
= hclge_get_mac_stat
,
12674 .update_stats
= hclge_update_stats
,
12675 .get_strings
= hclge_get_strings
,
12676 .get_sset_count
= hclge_get_sset_count
,
12677 .get_fw_version
= hclge_get_fw_version
,
12678 .get_mdix_mode
= hclge_get_mdix_mode
,
12679 .enable_vlan_filter
= hclge_enable_vlan_filter
,
12680 .set_vlan_filter
= hclge_set_vlan_filter
,
12681 .set_vf_vlan_filter
= hclge_set_vf_vlan_filter
,
12682 .enable_hw_strip_rxvtag
= hclge_en_hw_strip_rxvtag
,
12683 .reset_event
= hclge_reset_event
,
12684 .get_reset_level
= hclge_get_reset_level
,
12685 .set_default_reset_request
= hclge_set_def_reset_request
,
12686 .get_tqps_and_rss_info
= hclge_get_tqps_and_rss_info
,
12687 .set_channels
= hclge_set_channels
,
12688 .get_channels
= hclge_get_channels
,
12689 .get_regs_len
= hclge_get_regs_len
,
12690 .get_regs
= hclge_get_regs
,
12691 .set_led_id
= hclge_set_led_id
,
12692 .get_link_mode
= hclge_get_link_mode
,
12693 .add_fd_entry
= hclge_add_fd_entry
,
12694 .del_fd_entry
= hclge_del_fd_entry
,
12695 .get_fd_rule_cnt
= hclge_get_fd_rule_cnt
,
12696 .get_fd_rule_info
= hclge_get_fd_rule_info
,
12697 .get_fd_all_rules
= hclge_get_all_rules
,
12698 .enable_fd
= hclge_enable_fd
,
12699 .add_arfs_entry
= hclge_add_fd_entry_by_arfs
,
12700 .dbg_read_cmd
= hclge_dbg_read_cmd
,
12701 .handle_hw_ras_error
= hclge_handle_hw_ras_error
,
12702 .get_hw_reset_stat
= hclge_get_hw_reset_stat
,
12703 .ae_dev_resetting
= hclge_ae_dev_resetting
,
12704 .ae_dev_reset_cnt
= hclge_ae_dev_reset_cnt
,
12705 .set_gro_en
= hclge_gro_en
,
12706 .get_global_queue_id
= hclge_covert_handle_qid_global
,
12707 .set_timer_task
= hclge_set_timer_task
,
12708 .mac_connect_phy
= hclge_mac_connect_phy
,
12709 .mac_disconnect_phy
= hclge_mac_disconnect_phy
,
12710 .get_vf_config
= hclge_get_vf_config
,
12711 .set_vf_link_state
= hclge_set_vf_link_state
,
12712 .set_vf_spoofchk
= hclge_set_vf_spoofchk
,
12713 .set_vf_trust
= hclge_set_vf_trust
,
12714 .set_vf_rate
= hclge_set_vf_rate
,
12715 .set_vf_mac
= hclge_set_vf_mac
,
12716 .get_module_eeprom
= hclge_get_module_eeprom
,
12717 .get_cmdq_stat
= hclge_get_cmdq_stat
,
12718 .add_cls_flower
= hclge_add_cls_flower
,
12719 .del_cls_flower
= hclge_del_cls_flower
,
12720 .cls_flower_active
= hclge_is_cls_flower_active
,
12721 .get_phy_link_ksettings
= hclge_get_phy_link_ksettings
,
12722 .set_phy_link_ksettings
= hclge_set_phy_link_ksettings
,
12723 .set_tx_hwts_info
= hclge_ptp_set_tx_info
,
12724 .get_rx_hwts
= hclge_ptp_get_rx_hwts
,
12725 .get_ts_info
= hclge_ptp_get_ts_info
,
12726 .get_link_diagnosis_info
= hclge_get_link_diagnosis_info
,
12727 .clean_vf_config
= hclge_clean_vport_config
,
12728 .get_dscp_prio
= hclge_get_dscp_prio
,
12729 .get_wol
= hclge_get_wol
,
12730 .set_wol
= hclge_set_wol
,
12733 static struct hnae3_ae_algo ae_algo
= {
12735 .pdev_id_table
= ae_algo_pci_tbl
,
12738 static int __init
hclge_init(void)
12740 pr_info("%s is initializing\n", HCLGE_NAME
);
12742 hclge_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, HCLGE_NAME
);
12744 pr_err("%s: failed to create workqueue\n", HCLGE_NAME
);
12748 hnae3_register_ae_algo(&ae_algo
);
12753 static void __exit
hclge_exit(void)
12755 hnae3_unregister_ae_algo_prepare(&ae_algo
);
12756 hnae3_unregister_ae_algo(&ae_algo
);
12757 destroy_workqueue(hclge_wq
);
12759 module_init(hclge_init
);
12760 module_exit(hclge_exit
);
12762 MODULE_LICENSE("GPL");
12763 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12764 MODULE_DESCRIPTION("HCLGE Driver");
12765 MODULE_VERSION(HCLGE_MOD_VERSION
);