1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
10 #include <linux/if_vlan.h>
11 #include <linux/irq.h>
13 #include <linux/ipv6.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/skbuff.h>
17 #include <linux/sctp.h>
20 #include <net/ip6_checksum.h>
21 #include <net/page_pool/helpers.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
25 #include <net/vxlan.h>
26 #include <net/geneve.h>
29 #include "hns3_enet.h"
30 /* All hns3 tracepoints are defined by the include below, which
31 * must be included exactly once across the whole kernel with
32 * CREATE_TRACE_POINTS defined
34 #define CREATE_TRACE_POINTS
35 #include "hns3_trace.h"
37 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
38 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
40 #define hns3_rl_err(fmt, ...) \
42 if (net_ratelimit()) \
43 netdev_err(fmt, ##__VA_ARGS__); \
46 static void hns3_clear_all_ring(struct hnae3_handle
*h
, bool force
);
48 static const char hns3_driver_name
[] = "hns3";
49 static const char hns3_driver_string
[] =
50 "Hisilicon Ethernet Network Driver for Hip08 Family";
51 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
52 static struct hnae3_client client
;
54 static int debug
= -1;
55 module_param(debug
, int, 0);
56 MODULE_PARM_DESC(debug
, " Network interface message level setting");
58 static unsigned int tx_sgl
= 1;
59 module_param(tx_sgl
, uint
, 0600);
60 MODULE_PARM_DESC(tx_sgl
, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
62 static bool page_pool_enabled
= true;
63 module_param(page_pool_enabled
, bool, 0400);
65 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
66 sizeof(struct sg_table))
67 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
68 dma_get_cache_alignment())
70 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
71 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
73 #define HNS3_INNER_VLAN_TAG 1
74 #define HNS3_OUTER_VLAN_TAG 2
76 #define HNS3_MIN_TX_LEN 33U
77 #define HNS3_MIN_TUN_PKT_LEN 65U
79 /* hns3_pci_tbl - PCI Device ID Table
81 * Last entry must be all 0s
83 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
84 * Class, Class Mask, private data (not used) }
86 static const struct pci_device_id hns3_pci_tbl
[] = {
87 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
88 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
89 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
90 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
91 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
92 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
93 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
94 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
95 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
96 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
97 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
98 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
99 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_200G_RDMA
),
100 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
101 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_VF
), 0},
102 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_RDMA_DCB_PFC_VF
),
103 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
104 /* required last entry */
107 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
109 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
117 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
118 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
121 static const struct hns3_rx_ptype hns3_rx_ptype_tbl
[] = {
122 HNS3_RX_PTYPE_UNUSED_ENTRY(0),
123 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE
, ARP
, PKT_HASH_TYPE_NONE
),
124 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE
, RARP
, PKT_HASH_TYPE_NONE
),
125 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE
, LLDP
, PKT_HASH_TYPE_NONE
),
126 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
127 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
128 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
129 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE
, CNM
, PKT_HASH_TYPE_NONE
),
130 HNS3_RX_PTYPE_ENTRY(8, 0, NONE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
131 HNS3_RX_PTYPE_UNUSED_ENTRY(9),
132 HNS3_RX_PTYPE_UNUSED_ENTRY(10),
133 HNS3_RX_PTYPE_UNUSED_ENTRY(11),
134 HNS3_RX_PTYPE_UNUSED_ENTRY(12),
135 HNS3_RX_PTYPE_UNUSED_ENTRY(13),
136 HNS3_RX_PTYPE_UNUSED_ENTRY(14),
137 HNS3_RX_PTYPE_UNUSED_ENTRY(15),
138 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
139 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_NONE
),
140 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_NONE
),
141 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
142 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
143 HNS3_RX_PTYPE_ENTRY(21, 0, NONE
, IPV4
, PKT_HASH_TYPE_NONE
),
144 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
145 HNS3_RX_PTYPE_ENTRY(23, 0, NONE
, IPV4
, PKT_HASH_TYPE_L3
),
146 HNS3_RX_PTYPE_ENTRY(24, 0, NONE
, IPV4
, PKT_HASH_TYPE_L3
),
147 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
148 HNS3_RX_PTYPE_UNUSED_ENTRY(26),
149 HNS3_RX_PTYPE_UNUSED_ENTRY(27),
150 HNS3_RX_PTYPE_UNUSED_ENTRY(28),
151 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
152 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
153 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
154 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
155 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
156 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
157 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
158 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
159 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
160 HNS3_RX_PTYPE_UNUSED_ENTRY(38),
161 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
162 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
163 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
164 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
165 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
166 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
167 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
168 HNS3_RX_PTYPE_UNUSED_ENTRY(46),
169 HNS3_RX_PTYPE_UNUSED_ENTRY(47),
170 HNS3_RX_PTYPE_UNUSED_ENTRY(48),
171 HNS3_RX_PTYPE_UNUSED_ENTRY(49),
172 HNS3_RX_PTYPE_UNUSED_ENTRY(50),
173 HNS3_RX_PTYPE_UNUSED_ENTRY(51),
174 HNS3_RX_PTYPE_UNUSED_ENTRY(52),
175 HNS3_RX_PTYPE_UNUSED_ENTRY(53),
176 HNS3_RX_PTYPE_UNUSED_ENTRY(54),
177 HNS3_RX_PTYPE_UNUSED_ENTRY(55),
178 HNS3_RX_PTYPE_UNUSED_ENTRY(56),
179 HNS3_RX_PTYPE_UNUSED_ENTRY(57),
180 HNS3_RX_PTYPE_UNUSED_ENTRY(58),
181 HNS3_RX_PTYPE_UNUSED_ENTRY(59),
182 HNS3_RX_PTYPE_UNUSED_ENTRY(60),
183 HNS3_RX_PTYPE_UNUSED_ENTRY(61),
184 HNS3_RX_PTYPE_UNUSED_ENTRY(62),
185 HNS3_RX_PTYPE_UNUSED_ENTRY(63),
186 HNS3_RX_PTYPE_UNUSED_ENTRY(64),
187 HNS3_RX_PTYPE_UNUSED_ENTRY(65),
188 HNS3_RX_PTYPE_UNUSED_ENTRY(66),
189 HNS3_RX_PTYPE_UNUSED_ENTRY(67),
190 HNS3_RX_PTYPE_UNUSED_ENTRY(68),
191 HNS3_RX_PTYPE_UNUSED_ENTRY(69),
192 HNS3_RX_PTYPE_UNUSED_ENTRY(70),
193 HNS3_RX_PTYPE_UNUSED_ENTRY(71),
194 HNS3_RX_PTYPE_UNUSED_ENTRY(72),
195 HNS3_RX_PTYPE_UNUSED_ENTRY(73),
196 HNS3_RX_PTYPE_UNUSED_ENTRY(74),
197 HNS3_RX_PTYPE_UNUSED_ENTRY(75),
198 HNS3_RX_PTYPE_UNUSED_ENTRY(76),
199 HNS3_RX_PTYPE_UNUSED_ENTRY(77),
200 HNS3_RX_PTYPE_UNUSED_ENTRY(78),
201 HNS3_RX_PTYPE_UNUSED_ENTRY(79),
202 HNS3_RX_PTYPE_UNUSED_ENTRY(80),
203 HNS3_RX_PTYPE_UNUSED_ENTRY(81),
204 HNS3_RX_PTYPE_UNUSED_ENTRY(82),
205 HNS3_RX_PTYPE_UNUSED_ENTRY(83),
206 HNS3_RX_PTYPE_UNUSED_ENTRY(84),
207 HNS3_RX_PTYPE_UNUSED_ENTRY(85),
208 HNS3_RX_PTYPE_UNUSED_ENTRY(86),
209 HNS3_RX_PTYPE_UNUSED_ENTRY(87),
210 HNS3_RX_PTYPE_UNUSED_ENTRY(88),
211 HNS3_RX_PTYPE_UNUSED_ENTRY(89),
212 HNS3_RX_PTYPE_UNUSED_ENTRY(90),
213 HNS3_RX_PTYPE_UNUSED_ENTRY(91),
214 HNS3_RX_PTYPE_UNUSED_ENTRY(92),
215 HNS3_RX_PTYPE_UNUSED_ENTRY(93),
216 HNS3_RX_PTYPE_UNUSED_ENTRY(94),
217 HNS3_RX_PTYPE_UNUSED_ENTRY(95),
218 HNS3_RX_PTYPE_UNUSED_ENTRY(96),
219 HNS3_RX_PTYPE_UNUSED_ENTRY(97),
220 HNS3_RX_PTYPE_UNUSED_ENTRY(98),
221 HNS3_RX_PTYPE_UNUSED_ENTRY(99),
222 HNS3_RX_PTYPE_UNUSED_ENTRY(100),
223 HNS3_RX_PTYPE_UNUSED_ENTRY(101),
224 HNS3_RX_PTYPE_UNUSED_ENTRY(102),
225 HNS3_RX_PTYPE_UNUSED_ENTRY(103),
226 HNS3_RX_PTYPE_UNUSED_ENTRY(104),
227 HNS3_RX_PTYPE_UNUSED_ENTRY(105),
228 HNS3_RX_PTYPE_UNUSED_ENTRY(106),
229 HNS3_RX_PTYPE_UNUSED_ENTRY(107),
230 HNS3_RX_PTYPE_UNUSED_ENTRY(108),
231 HNS3_RX_PTYPE_UNUSED_ENTRY(109),
232 HNS3_RX_PTYPE_UNUSED_ENTRY(110),
233 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
234 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
235 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
236 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
237 HNS3_RX_PTYPE_ENTRY(115, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
238 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
239 HNS3_RX_PTYPE_ENTRY(117, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
240 HNS3_RX_PTYPE_ENTRY(118, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
241 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
242 HNS3_RX_PTYPE_UNUSED_ENTRY(120),
243 HNS3_RX_PTYPE_UNUSED_ENTRY(121),
244 HNS3_RX_PTYPE_UNUSED_ENTRY(122),
245 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
246 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
247 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
248 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
249 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
250 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
251 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
252 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
253 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
254 HNS3_RX_PTYPE_UNUSED_ENTRY(132),
255 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
256 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
257 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
258 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
259 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
260 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
261 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
262 HNS3_RX_PTYPE_UNUSED_ENTRY(140),
263 HNS3_RX_PTYPE_UNUSED_ENTRY(141),
264 HNS3_RX_PTYPE_UNUSED_ENTRY(142),
265 HNS3_RX_PTYPE_UNUSED_ENTRY(143),
266 HNS3_RX_PTYPE_UNUSED_ENTRY(144),
267 HNS3_RX_PTYPE_UNUSED_ENTRY(145),
268 HNS3_RX_PTYPE_UNUSED_ENTRY(146),
269 HNS3_RX_PTYPE_UNUSED_ENTRY(147),
270 HNS3_RX_PTYPE_UNUSED_ENTRY(148),
271 HNS3_RX_PTYPE_UNUSED_ENTRY(149),
272 HNS3_RX_PTYPE_UNUSED_ENTRY(150),
273 HNS3_RX_PTYPE_UNUSED_ENTRY(151),
274 HNS3_RX_PTYPE_UNUSED_ENTRY(152),
275 HNS3_RX_PTYPE_UNUSED_ENTRY(153),
276 HNS3_RX_PTYPE_UNUSED_ENTRY(154),
277 HNS3_RX_PTYPE_UNUSED_ENTRY(155),
278 HNS3_RX_PTYPE_UNUSED_ENTRY(156),
279 HNS3_RX_PTYPE_UNUSED_ENTRY(157),
280 HNS3_RX_PTYPE_UNUSED_ENTRY(158),
281 HNS3_RX_PTYPE_UNUSED_ENTRY(159),
282 HNS3_RX_PTYPE_UNUSED_ENTRY(160),
283 HNS3_RX_PTYPE_UNUSED_ENTRY(161),
284 HNS3_RX_PTYPE_UNUSED_ENTRY(162),
285 HNS3_RX_PTYPE_UNUSED_ENTRY(163),
286 HNS3_RX_PTYPE_UNUSED_ENTRY(164),
287 HNS3_RX_PTYPE_UNUSED_ENTRY(165),
288 HNS3_RX_PTYPE_UNUSED_ENTRY(166),
289 HNS3_RX_PTYPE_UNUSED_ENTRY(167),
290 HNS3_RX_PTYPE_UNUSED_ENTRY(168),
291 HNS3_RX_PTYPE_UNUSED_ENTRY(169),
292 HNS3_RX_PTYPE_UNUSED_ENTRY(170),
293 HNS3_RX_PTYPE_UNUSED_ENTRY(171),
294 HNS3_RX_PTYPE_UNUSED_ENTRY(172),
295 HNS3_RX_PTYPE_UNUSED_ENTRY(173),
296 HNS3_RX_PTYPE_UNUSED_ENTRY(174),
297 HNS3_RX_PTYPE_UNUSED_ENTRY(175),
298 HNS3_RX_PTYPE_UNUSED_ENTRY(176),
299 HNS3_RX_PTYPE_UNUSED_ENTRY(177),
300 HNS3_RX_PTYPE_UNUSED_ENTRY(178),
301 HNS3_RX_PTYPE_UNUSED_ENTRY(179),
302 HNS3_RX_PTYPE_UNUSED_ENTRY(180),
303 HNS3_RX_PTYPE_UNUSED_ENTRY(181),
304 HNS3_RX_PTYPE_UNUSED_ENTRY(182),
305 HNS3_RX_PTYPE_UNUSED_ENTRY(183),
306 HNS3_RX_PTYPE_UNUSED_ENTRY(184),
307 HNS3_RX_PTYPE_UNUSED_ENTRY(185),
308 HNS3_RX_PTYPE_UNUSED_ENTRY(186),
309 HNS3_RX_PTYPE_UNUSED_ENTRY(187),
310 HNS3_RX_PTYPE_UNUSED_ENTRY(188),
311 HNS3_RX_PTYPE_UNUSED_ENTRY(189),
312 HNS3_RX_PTYPE_UNUSED_ENTRY(190),
313 HNS3_RX_PTYPE_UNUSED_ENTRY(191),
314 HNS3_RX_PTYPE_UNUSED_ENTRY(192),
315 HNS3_RX_PTYPE_UNUSED_ENTRY(193),
316 HNS3_RX_PTYPE_UNUSED_ENTRY(194),
317 HNS3_RX_PTYPE_UNUSED_ENTRY(195),
318 HNS3_RX_PTYPE_UNUSED_ENTRY(196),
319 HNS3_RX_PTYPE_UNUSED_ENTRY(197),
320 HNS3_RX_PTYPE_UNUSED_ENTRY(198),
321 HNS3_RX_PTYPE_UNUSED_ENTRY(199),
322 HNS3_RX_PTYPE_UNUSED_ENTRY(200),
323 HNS3_RX_PTYPE_UNUSED_ENTRY(201),
324 HNS3_RX_PTYPE_UNUSED_ENTRY(202),
325 HNS3_RX_PTYPE_UNUSED_ENTRY(203),
326 HNS3_RX_PTYPE_UNUSED_ENTRY(204),
327 HNS3_RX_PTYPE_UNUSED_ENTRY(205),
328 HNS3_RX_PTYPE_UNUSED_ENTRY(206),
329 HNS3_RX_PTYPE_UNUSED_ENTRY(207),
330 HNS3_RX_PTYPE_UNUSED_ENTRY(208),
331 HNS3_RX_PTYPE_UNUSED_ENTRY(209),
332 HNS3_RX_PTYPE_UNUSED_ENTRY(210),
333 HNS3_RX_PTYPE_UNUSED_ENTRY(211),
334 HNS3_RX_PTYPE_UNUSED_ENTRY(212),
335 HNS3_RX_PTYPE_UNUSED_ENTRY(213),
336 HNS3_RX_PTYPE_UNUSED_ENTRY(214),
337 HNS3_RX_PTYPE_UNUSED_ENTRY(215),
338 HNS3_RX_PTYPE_UNUSED_ENTRY(216),
339 HNS3_RX_PTYPE_UNUSED_ENTRY(217),
340 HNS3_RX_PTYPE_UNUSED_ENTRY(218),
341 HNS3_RX_PTYPE_UNUSED_ENTRY(219),
342 HNS3_RX_PTYPE_UNUSED_ENTRY(220),
343 HNS3_RX_PTYPE_UNUSED_ENTRY(221),
344 HNS3_RX_PTYPE_UNUSED_ENTRY(222),
345 HNS3_RX_PTYPE_UNUSED_ENTRY(223),
346 HNS3_RX_PTYPE_UNUSED_ENTRY(224),
347 HNS3_RX_PTYPE_UNUSED_ENTRY(225),
348 HNS3_RX_PTYPE_UNUSED_ENTRY(226),
349 HNS3_RX_PTYPE_UNUSED_ENTRY(227),
350 HNS3_RX_PTYPE_UNUSED_ENTRY(228),
351 HNS3_RX_PTYPE_UNUSED_ENTRY(229),
352 HNS3_RX_PTYPE_UNUSED_ENTRY(230),
353 HNS3_RX_PTYPE_UNUSED_ENTRY(231),
354 HNS3_RX_PTYPE_UNUSED_ENTRY(232),
355 HNS3_RX_PTYPE_UNUSED_ENTRY(233),
356 HNS3_RX_PTYPE_UNUSED_ENTRY(234),
357 HNS3_RX_PTYPE_UNUSED_ENTRY(235),
358 HNS3_RX_PTYPE_UNUSED_ENTRY(236),
359 HNS3_RX_PTYPE_UNUSED_ENTRY(237),
360 HNS3_RX_PTYPE_UNUSED_ENTRY(238),
361 HNS3_RX_PTYPE_UNUSED_ENTRY(239),
362 HNS3_RX_PTYPE_UNUSED_ENTRY(240),
363 HNS3_RX_PTYPE_UNUSED_ENTRY(241),
364 HNS3_RX_PTYPE_UNUSED_ENTRY(242),
365 HNS3_RX_PTYPE_UNUSED_ENTRY(243),
366 HNS3_RX_PTYPE_UNUSED_ENTRY(244),
367 HNS3_RX_PTYPE_UNUSED_ENTRY(245),
368 HNS3_RX_PTYPE_UNUSED_ENTRY(246),
369 HNS3_RX_PTYPE_UNUSED_ENTRY(247),
370 HNS3_RX_PTYPE_UNUSED_ENTRY(248),
371 HNS3_RX_PTYPE_UNUSED_ENTRY(249),
372 HNS3_RX_PTYPE_UNUSED_ENTRY(250),
373 HNS3_RX_PTYPE_UNUSED_ENTRY(251),
374 HNS3_RX_PTYPE_UNUSED_ENTRY(252),
375 HNS3_RX_PTYPE_UNUSED_ENTRY(253),
376 HNS3_RX_PTYPE_UNUSED_ENTRY(254),
377 HNS3_RX_PTYPE_UNUSED_ENTRY(255),
380 #define HNS3_INVALID_PTYPE \
381 ARRAY_SIZE(hns3_rx_ptype_tbl)
383 static irqreturn_t
hns3_irq_handle(int irq
, void *vector
)
385 struct hns3_enet_tqp_vector
*tqp_vector
= vector
;
387 napi_schedule_irqoff(&tqp_vector
->napi
);
388 tqp_vector
->event_cnt
++;
393 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
395 struct hns3_enet_tqp_vector
*tqp_vectors
;
398 for (i
= 0; i
< priv
->vector_num
; i
++) {
399 tqp_vectors
= &priv
->tqp_vector
[i
];
401 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
404 /* clear the affinity mask */
405 irq_set_affinity_hint(tqp_vectors
->vector_irq
, NULL
);
407 /* release the irq resource */
408 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
409 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
413 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
415 struct hns3_enet_tqp_vector
*tqp_vectors
;
416 int txrx_int_idx
= 0;
422 for (i
= 0; i
< priv
->vector_num
; i
++) {
423 tqp_vectors
= &priv
->tqp_vector
[i
];
425 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
428 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
429 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
430 "%s-%s-%s-%d", hns3_driver_name
,
431 pci_name(priv
->ae_handle
->pdev
),
432 "TxRx", txrx_int_idx
++);
434 } else if (tqp_vectors
->rx_group
.ring
) {
435 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
436 "%s-%s-%s-%d", hns3_driver_name
,
437 pci_name(priv
->ae_handle
->pdev
),
439 } else if (tqp_vectors
->tx_group
.ring
) {
440 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
441 "%s-%s-%s-%d", hns3_driver_name
,
442 pci_name(priv
->ae_handle
->pdev
),
445 /* Skip this unused q_vector */
449 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
451 irq_set_status_flags(tqp_vectors
->vector_irq
, IRQ_NOAUTOEN
);
452 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
453 tqp_vectors
->name
, tqp_vectors
);
455 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
456 tqp_vectors
->vector_irq
);
457 hns3_nic_uninit_irq(priv
);
461 irq_set_affinity_hint(tqp_vectors
->vector_irq
,
462 &tqp_vectors
->affinity_mask
);
464 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
470 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
473 writel(mask_en
, tqp_vector
->mask_addr
);
476 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
478 napi_enable(&tqp_vector
->napi
);
479 enable_irq(tqp_vector
->vector_irq
);
482 hns3_mask_vector_irq(tqp_vector
, 1);
485 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
488 hns3_mask_vector_irq(tqp_vector
, 0);
490 disable_irq(tqp_vector
->vector_irq
);
491 napi_disable(&tqp_vector
->napi
);
492 cancel_work_sync(&tqp_vector
->rx_group
.dim
.work
);
493 cancel_work_sync(&tqp_vector
->tx_group
.dim
.work
);
496 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
499 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
501 /* this defines the configuration for RL (Interrupt Rate Limiter).
502 * Rl defines rate of interrupts i.e. number of interrupts-per-second
503 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
505 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.adapt_enable
&&
506 !tqp_vector
->rx_group
.coal
.adapt_enable
)
507 /* According to the hardware, the range of rl_reg is
508 * 0-59 and the unit is 4.
510 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
512 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
515 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
520 if (tqp_vector
->rx_group
.coal
.unit_1us
)
521 new_val
= gl_value
| HNS3_INT_GL_1US
;
523 new_val
= hns3_gl_usec_to_reg(gl_value
);
525 writel(new_val
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
528 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
533 if (tqp_vector
->tx_group
.coal
.unit_1us
)
534 new_val
= gl_value
| HNS3_INT_GL_1US
;
536 new_val
= hns3_gl_usec_to_reg(gl_value
);
538 writel(new_val
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
541 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector
*tqp_vector
,
544 writel(ql_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_TX_QL_OFFSET
);
547 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector
*tqp_vector
,
550 writel(ql_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RX_QL_OFFSET
);
553 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector
*tqp_vector
,
554 struct hns3_nic_priv
*priv
)
556 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
557 struct hns3_enet_coalesce
*tx_coal
= &tqp_vector
->tx_group
.coal
;
558 struct hns3_enet_coalesce
*rx_coal
= &tqp_vector
->rx_group
.coal
;
559 struct hns3_enet_coalesce
*ptx_coal
= &priv
->tx_coal
;
560 struct hns3_enet_coalesce
*prx_coal
= &priv
->rx_coal
;
562 tx_coal
->adapt_enable
= ptx_coal
->adapt_enable
;
563 rx_coal
->adapt_enable
= prx_coal
->adapt_enable
;
565 tx_coal
->int_gl
= ptx_coal
->int_gl
;
566 rx_coal
->int_gl
= prx_coal
->int_gl
;
568 rx_coal
->flow_level
= prx_coal
->flow_level
;
569 tx_coal
->flow_level
= ptx_coal
->flow_level
;
571 /* device version above V3(include V3), GL can configure 1us
572 * unit, so uses 1us unit.
574 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
) {
575 tx_coal
->unit_1us
= 1;
576 rx_coal
->unit_1us
= 1;
579 if (ae_dev
->dev_specs
.int_ql_max
) {
580 tx_coal
->ql_enable
= 1;
581 rx_coal
->ql_enable
= 1;
582 tx_coal
->int_ql_max
= ae_dev
->dev_specs
.int_ql_max
;
583 rx_coal
->int_ql_max
= ae_dev
->dev_specs
.int_ql_max
;
584 tx_coal
->int_ql
= ptx_coal
->int_ql
;
585 rx_coal
->int_ql
= prx_coal
->int_ql
;
590 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
591 struct hns3_nic_priv
*priv
)
593 struct hns3_enet_coalesce
*tx_coal
= &tqp_vector
->tx_group
.coal
;
594 struct hns3_enet_coalesce
*rx_coal
= &tqp_vector
->rx_group
.coal
;
595 struct hnae3_handle
*h
= priv
->ae_handle
;
597 hns3_set_vector_coalesce_tx_gl(tqp_vector
, tx_coal
->int_gl
);
598 hns3_set_vector_coalesce_rx_gl(tqp_vector
, rx_coal
->int_gl
);
599 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
601 if (tx_coal
->ql_enable
)
602 hns3_set_vector_coalesce_tx_ql(tqp_vector
, tx_coal
->int_ql
);
604 if (rx_coal
->ql_enable
)
605 hns3_set_vector_coalesce_rx_ql(tqp_vector
, rx_coal
->int_ql
);
608 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
610 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
611 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
612 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
613 unsigned int queue_size
= kinfo
->num_tqps
;
616 if (tc_info
->num_tc
<= 1 && !tc_info
->mqprio_active
) {
617 netdev_reset_tc(netdev
);
619 ret
= netdev_set_num_tc(netdev
, tc_info
->num_tc
);
622 "netdev_set_num_tc fail, ret=%d!\n", ret
);
626 for (i
= 0; i
< tc_info
->num_tc
; i
++)
627 netdev_set_tc_queue(netdev
, i
, tc_info
->tqp_count
[i
],
628 tc_info
->tqp_offset
[i
]);
631 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
634 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret
);
638 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
641 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
648 u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
650 u16 alloc_tqps
, max_rss_size
, rss_size
;
652 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &alloc_tqps
, &max_rss_size
);
653 rss_size
= alloc_tqps
/ h
->kinfo
.tc_info
.num_tc
;
655 return min_t(u16
, rss_size
, max_rss_size
);
658 static void hns3_tqp_enable(struct hnae3_queue
*tqp
)
662 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
663 rcb_reg
|= BIT(HNS3_RING_EN_B
);
664 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
667 static void hns3_tqp_disable(struct hnae3_queue
*tqp
)
671 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
672 rcb_reg
&= ~BIT(HNS3_RING_EN_B
);
673 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
676 static void hns3_free_rx_cpu_rmap(struct net_device
*netdev
)
678 #ifdef CONFIG_RFS_ACCEL
679 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
680 netdev
->rx_cpu_rmap
= NULL
;
684 static int hns3_set_rx_cpu_rmap(struct net_device
*netdev
)
686 #ifdef CONFIG_RFS_ACCEL
687 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
688 struct hns3_enet_tqp_vector
*tqp_vector
;
691 if (!netdev
->rx_cpu_rmap
) {
692 netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->vector_num
);
693 if (!netdev
->rx_cpu_rmap
)
697 for (i
= 0; i
< priv
->vector_num
; i
++) {
698 tqp_vector
= &priv
->tqp_vector
[i
];
699 ret
= irq_cpu_rmap_add(netdev
->rx_cpu_rmap
,
700 tqp_vector
->vector_irq
);
702 hns3_free_rx_cpu_rmap(netdev
);
710 static int hns3_nic_net_up(struct net_device
*netdev
)
712 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
713 struct hnae3_handle
*h
= priv
->ae_handle
;
717 ret
= hns3_nic_reset_all_ring(h
);
721 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
723 /* enable the vectors */
724 for (i
= 0; i
< priv
->vector_num
; i
++)
725 hns3_vector_enable(&priv
->tqp_vector
[i
]);
728 for (j
= 0; j
< h
->kinfo
.num_tqps
; j
++)
729 hns3_tqp_enable(h
->kinfo
.tqp
[j
]);
731 /* start the ae_dev */
732 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
734 set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
736 hns3_tqp_disable(h
->kinfo
.tqp
[j
]);
738 for (j
= i
- 1; j
>= 0; j
--)
739 hns3_vector_disable(&priv
->tqp_vector
[j
]);
745 static void hns3_config_xps(struct hns3_nic_priv
*priv
)
749 for (i
= 0; i
< priv
->vector_num
; i
++) {
750 struct hns3_enet_tqp_vector
*tqp_vector
= &priv
->tqp_vector
[i
];
751 struct hns3_enet_ring
*ring
= tqp_vector
->tx_group
.ring
;
756 ret
= netif_set_xps_queue(priv
->netdev
,
757 &tqp_vector
->affinity_mask
,
758 ring
->tqp
->tqp_index
);
760 netdev_warn(priv
->netdev
,
761 "set xps queue failed: %d", ret
);
768 static int hns3_nic_net_open(struct net_device
*netdev
)
770 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
771 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
772 struct hnae3_knic_private_info
*kinfo
;
775 if (hns3_nic_resetting(netdev
))
778 if (!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
779 netdev_warn(netdev
, "net open repeatedly!\n");
783 netif_carrier_off(netdev
);
785 ret
= hns3_nic_set_real_num_queue(netdev
);
789 ret
= hns3_nic_net_up(netdev
);
791 netdev_err(netdev
, "net up fail, ret=%d!\n", ret
);
796 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
797 netdev_set_prio_tc_map(netdev
, i
, kinfo
->tc_info
.prio_tc
[i
]);
799 if (h
->ae_algo
->ops
->set_timer_task
)
800 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, true);
802 hns3_config_xps(priv
);
804 netif_dbg(h
, drv
, netdev
, "net open\n");
809 static void hns3_reset_tx_queue(struct hnae3_handle
*h
)
811 struct net_device
*ndev
= h
->kinfo
.netdev
;
812 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
813 struct netdev_queue
*dev_queue
;
816 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
817 dev_queue
= netdev_get_tx_queue(ndev
,
818 priv
->ring
[i
].queue_index
);
819 netdev_tx_reset_queue(dev_queue
);
823 static void hns3_nic_net_down(struct net_device
*netdev
)
825 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
826 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
827 const struct hnae3_ae_ops
*ops
;
830 /* disable vectors */
831 for (i
= 0; i
< priv
->vector_num
; i
++)
832 hns3_vector_disable(&priv
->tqp_vector
[i
]);
835 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
836 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
839 ops
= priv
->ae_handle
->ae_algo
->ops
;
841 ops
->stop(priv
->ae_handle
);
843 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
844 * during reset process, because driver may not be able
845 * to disable the ring through firmware when downing the netdev.
847 if (!hns3_nic_resetting(netdev
))
848 hns3_clear_all_ring(priv
->ae_handle
, false);
850 hns3_reset_tx_queue(priv
->ae_handle
);
853 static int hns3_nic_net_stop(struct net_device
*netdev
)
855 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
856 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
858 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
861 netif_dbg(h
, drv
, netdev
, "net stop\n");
863 if (h
->ae_algo
->ops
->set_timer_task
)
864 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, false);
866 netif_carrier_off(netdev
);
867 netif_tx_disable(netdev
);
869 hns3_nic_net_down(netdev
);
874 static int hns3_nic_uc_sync(struct net_device
*netdev
,
875 const unsigned char *addr
)
877 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
879 if (h
->ae_algo
->ops
->add_uc_addr
)
880 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
885 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
886 const unsigned char *addr
)
888 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
890 /* need ignore the request of removing device address, because
891 * we store the device address and other addresses of uc list
892 * in the function's mac filter list.
894 if (ether_addr_equal(addr
, netdev
->dev_addr
))
897 if (h
->ae_algo
->ops
->rm_uc_addr
)
898 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
903 static int hns3_nic_mc_sync(struct net_device
*netdev
,
904 const unsigned char *addr
)
906 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
908 if (h
->ae_algo
->ops
->add_mc_addr
)
909 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
914 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
915 const unsigned char *addr
)
917 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
919 if (h
->ae_algo
->ops
->rm_mc_addr
)
920 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
925 static u8
hns3_get_netdev_flags(struct net_device
*netdev
)
929 if (netdev
->flags
& IFF_PROMISC
)
930 flags
= HNAE3_USER_UPE
| HNAE3_USER_MPE
| HNAE3_BPE
;
931 else if (netdev
->flags
& IFF_ALLMULTI
)
932 flags
= HNAE3_USER_MPE
;
937 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
939 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
942 new_flags
= hns3_get_netdev_flags(netdev
);
944 __dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
);
945 __dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
);
947 /* User mode Promisc mode enable and vlan filtering is disabled to
948 * let all packets in.
950 h
->netdev_flags
= new_flags
;
951 hns3_request_update_promisc_mode(h
);
954 void hns3_request_update_promisc_mode(struct hnae3_handle
*handle
)
956 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
958 if (ops
->request_update_promisc_mode
)
959 ops
->request_update_promisc_mode(handle
);
962 static u32
hns3_tx_spare_space(struct hns3_enet_ring
*ring
)
964 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
967 /* This smp_load_acquire() pairs with smp_store_release() in
968 * hns3_tx_spare_update() called in tx desc cleaning process.
970 ntc
= smp_load_acquire(&tx_spare
->last_to_clean
);
971 ntu
= tx_spare
->next_to_use
;
974 return ntc
- ntu
- 1;
976 /* The free tx buffer is divided into two part, so pick the
979 return max(ntc
, tx_spare
->len
- ntu
) - 1;
982 static void hns3_tx_spare_update(struct hns3_enet_ring
*ring
)
984 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
987 tx_spare
->last_to_clean
== tx_spare
->next_to_clean
)
990 /* This smp_store_release() pairs with smp_load_acquire() in
991 * hns3_tx_spare_space() called in xmit process.
993 smp_store_release(&tx_spare
->last_to_clean
,
994 tx_spare
->next_to_clean
);
997 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring
*ring
,
1001 u32 len
= skb
->len
<= ring
->tx_copybreak
? skb
->len
:
1004 if (len
> ring
->tx_copybreak
)
1007 if (ALIGN(len
, dma_get_cache_alignment()) > space
) {
1008 hns3_ring_stats_update(ring
, tx_spare_full
);
1015 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring
*ring
,
1016 struct sk_buff
*skb
,
1019 if (skb
->len
<= ring
->tx_copybreak
|| !tx_sgl
||
1020 (!skb_has_frag_list(skb
) &&
1021 skb_shinfo(skb
)->nr_frags
< tx_sgl
))
1024 if (space
< HNS3_MAX_SGL_SIZE
) {
1025 hns3_ring_stats_update(ring
, tx_spare_full
);
1032 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring
*ring
)
1034 u32 alloc_size
= ring
->tqp
->handle
->kinfo
.tx_spare_buf_size
;
1035 struct hns3_tx_spare
*tx_spare
;
1043 order
= get_order(alloc_size
);
1044 if (order
> MAX_ORDER
) {
1045 if (net_ratelimit())
1046 dev_warn(ring_to_dev(ring
), "failed to allocate tx spare buffer, exceed to max order\n");
1050 tx_spare
= devm_kzalloc(ring_to_dev(ring
), sizeof(*tx_spare
),
1053 /* The driver still work without the tx spare buffer */
1054 dev_warn(ring_to_dev(ring
), "failed to allocate hns3_tx_spare\n");
1055 goto devm_kzalloc_error
;
1058 page
= alloc_pages_node(dev_to_node(ring_to_dev(ring
)),
1061 dev_warn(ring_to_dev(ring
), "failed to allocate tx spare pages\n");
1062 goto alloc_pages_error
;
1065 dma
= dma_map_page(ring_to_dev(ring
), page
, 0,
1066 PAGE_SIZE
<< order
, DMA_TO_DEVICE
);
1067 if (dma_mapping_error(ring_to_dev(ring
), dma
)) {
1068 dev_warn(ring_to_dev(ring
), "failed to map pages for tx spare\n");
1069 goto dma_mapping_error
;
1072 tx_spare
->dma
= dma
;
1073 tx_spare
->buf
= page_address(page
);
1074 tx_spare
->len
= PAGE_SIZE
<< order
;
1075 ring
->tx_spare
= tx_spare
;
1081 devm_kfree(ring_to_dev(ring
), tx_spare
);
1083 ring
->tqp
->handle
->kinfo
.tx_spare_buf_size
= 0;
1086 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1087 * before calling below function to allocate tx buffer.
1089 static void *hns3_tx_spare_alloc(struct hns3_enet_ring
*ring
,
1090 unsigned int size
, dma_addr_t
*dma
,
1093 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1094 u32 ntu
= tx_spare
->next_to_use
;
1096 size
= ALIGN(size
, dma_get_cache_alignment());
1099 /* Tx spare buffer wraps back here because the end of
1100 * freed tx buffer is not enough.
1102 if (ntu
+ size
> tx_spare
->len
) {
1103 *cb_len
+= (tx_spare
->len
- ntu
);
1107 tx_spare
->next_to_use
= ntu
+ size
;
1108 if (tx_spare
->next_to_use
== tx_spare
->len
)
1109 tx_spare
->next_to_use
= 0;
1111 *dma
= tx_spare
->dma
+ ntu
;
1113 return tx_spare
->buf
+ ntu
;
1116 static void hns3_tx_spare_rollback(struct hns3_enet_ring
*ring
, u32 len
)
1118 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1120 if (len
> tx_spare
->next_to_use
) {
1121 len
-= tx_spare
->next_to_use
;
1122 tx_spare
->next_to_use
= tx_spare
->len
- len
;
1124 tx_spare
->next_to_use
-= len
;
1128 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring
*ring
,
1129 struct hns3_desc_cb
*cb
)
1131 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1132 u32 ntc
= tx_spare
->next_to_clean
;
1133 u32 len
= cb
->length
;
1135 tx_spare
->next_to_clean
+= len
;
1137 if (tx_spare
->next_to_clean
>= tx_spare
->len
) {
1138 tx_spare
->next_to_clean
-= tx_spare
->len
;
1140 if (tx_spare
->next_to_clean
) {
1142 len
= tx_spare
->next_to_clean
;
1146 /* This tx spare buffer is only really reclaimed after calling
1147 * hns3_tx_spare_update(), so it is still safe to use the info in
1148 * the tx buffer to do the dma sync or sg unmapping after
1149 * tx_spare->next_to_clean is moved forword.
1151 if (cb
->type
& (DESC_TYPE_BOUNCE_HEAD
| DESC_TYPE_BOUNCE_ALL
)) {
1152 dma_addr_t dma
= tx_spare
->dma
+ ntc
;
1154 dma_sync_single_for_cpu(ring_to_dev(ring
), dma
, len
,
1157 struct sg_table
*sgt
= tx_spare
->buf
+ ntc
;
1159 dma_unmap_sg(ring_to_dev(ring
), sgt
->sgl
, sgt
->orig_nents
,
1164 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen_fdop_ol4cs
,
1165 u16
*mss
, u32
*type_cs_vlan_tso
, u32
*send_bytes
)
1167 u32 l4_offset
, hdr_len
;
1168 union l3_hdr_info l3
;
1169 union l4_hdr_info l4
;
1173 if (!skb_is_gso(skb
))
1176 ret
= skb_cow_head(skb
, 0);
1177 if (unlikely(ret
< 0))
1180 l3
.hdr
= skb_network_header(skb
);
1181 l4
.hdr
= skb_transport_header(skb
);
1183 /* Software should clear the IPv4's checksum field when tso is
1186 if (l3
.v4
->version
== 4)
1190 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
1192 SKB_GSO_UDP_TUNNEL
|
1193 SKB_GSO_UDP_TUNNEL_CSUM
)) {
1194 /* reset l3&l4 pointers from outer to inner headers */
1195 l3
.hdr
= skb_inner_network_header(skb
);
1196 l4
.hdr
= skb_inner_transport_header(skb
);
1198 /* Software should clear the IPv4's checksum field when
1201 if (l3
.v4
->version
== 4)
1205 /* normal or tunnel packet */
1206 l4_offset
= l4
.hdr
- skb
->data
;
1208 /* remove payload length from inner pseudo checksum when tso */
1209 l4_paylen
= skb
->len
- l4_offset
;
1211 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
1212 hdr_len
= sizeof(*l4
.udp
) + l4_offset
;
1213 csum_replace_by_diff(&l4
.udp
->check
,
1214 (__force __wsum
)htonl(l4_paylen
));
1216 hdr_len
= (l4
.tcp
->doff
<< 2) + l4_offset
;
1217 csum_replace_by_diff(&l4
.tcp
->check
,
1218 (__force __wsum
)htonl(l4_paylen
));
1221 *send_bytes
= (skb_shinfo(skb
)->gso_segs
- 1) * hdr_len
+ skb
->len
;
1223 /* find the txbd field values */
1224 *paylen_fdop_ol4cs
= skb
->len
- hdr_len
;
1225 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_TSO_B
, 1);
1227 /* offload outer UDP header checksum */
1228 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
)
1229 hns3_set_field(*paylen_fdop_ol4cs
, HNS3_TXD_OL4CS_B
, 1);
1231 /* get MSS for TSO */
1232 *mss
= skb_shinfo(skb
)->gso_size
;
1234 trace_hns3_tso(skb
);
1239 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
1242 union l3_hdr_info l3
;
1243 unsigned char *l4_hdr
;
1244 unsigned char *exthdr
;
1248 /* find outer header point */
1249 l3
.hdr
= skb_network_header(skb
);
1250 l4_hdr
= skb_transport_header(skb
);
1252 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1253 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
1254 l4_proto_tmp
= l3
.v6
->nexthdr
;
1255 if (l4_hdr
!= exthdr
)
1256 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1257 &l4_proto_tmp
, &frag_off
);
1258 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
1259 l4_proto_tmp
= l3
.v4
->protocol
;
1264 *ol4_proto
= l4_proto_tmp
;
1267 if (!skb
->encapsulation
) {
1272 /* find inner header point */
1273 l3
.hdr
= skb_inner_network_header(skb
);
1274 l4_hdr
= skb_inner_transport_header(skb
);
1276 if (l3
.v6
->version
== 6) {
1277 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
1278 l4_proto_tmp
= l3
.v6
->nexthdr
;
1279 if (l4_hdr
!= exthdr
)
1280 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1281 &l4_proto_tmp
, &frag_off
);
1282 } else if (l3
.v4
->version
== 4) {
1283 l4_proto_tmp
= l3
.v4
->protocol
;
1286 *il4_proto
= l4_proto_tmp
;
1291 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1292 * and it is udp packet, which has a dest port as the IANA assigned.
1293 * the hardware is expected to do the checksum offload, but the
1294 * hardware will not do the checksum offload when udp dest port is
1295 * 4789, 4790 or 6081.
1297 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
1299 struct hns3_nic_priv
*priv
= netdev_priv(skb
->dev
);
1300 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
1301 union l4_hdr_info l4
;
1303 /* device version above V3(include V3), the hardware can
1304 * do this checksum offload.
1306 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
1309 l4
.hdr
= skb_transport_header(skb
);
1311 if (!(!skb
->encapsulation
&&
1312 (l4
.udp
->dest
== htons(IANA_VXLAN_UDP_PORT
) ||
1313 l4
.udp
->dest
== htons(GENEVE_UDP_PORT
) ||
1314 l4
.udp
->dest
== htons(IANA_VXLAN_GPE_UDP_PORT
))))
1320 static void hns3_set_outer_l2l3l4(struct sk_buff
*skb
, u8 ol4_proto
,
1321 u32
*ol_type_vlan_len_msec
)
1323 u32 l2_len
, l3_len
, l4_len
;
1324 unsigned char *il2_hdr
;
1325 union l3_hdr_info l3
;
1326 union l4_hdr_info l4
;
1328 l3
.hdr
= skb_network_header(skb
);
1329 l4
.hdr
= skb_transport_header(skb
);
1331 /* compute OL2 header size, defined in 2 Bytes */
1332 l2_len
= l3
.hdr
- skb
->data
;
1333 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L2LEN_S
, l2_len
>> 1);
1335 /* compute OL3 header size, defined in 4 Bytes */
1336 l3_len
= l4
.hdr
- l3
.hdr
;
1337 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_S
, l3_len
>> 2);
1339 il2_hdr
= skb_inner_mac_header(skb
);
1340 /* compute OL4 header size, defined in 4 Bytes */
1341 l4_len
= il2_hdr
- l4
.hdr
;
1342 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_S
, l4_len
>> 2);
1344 /* define outer network header type */
1345 if (skb
->protocol
== htons(ETH_P_IP
)) {
1346 if (skb_is_gso(skb
))
1347 hns3_set_field(*ol_type_vlan_len_msec
,
1349 HNS3_OL3T_IPV4_CSUM
);
1351 hns3_set_field(*ol_type_vlan_len_msec
,
1353 HNS3_OL3T_IPV4_NO_CSUM
);
1354 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1355 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_S
,
1359 if (ol4_proto
== IPPROTO_UDP
)
1360 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_TUNTYPE_S
,
1361 HNS3_TUN_MAC_IN_UDP
);
1362 else if (ol4_proto
== IPPROTO_GRE
)
1363 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_TUNTYPE_S
,
1367 static void hns3_set_l3_type(struct sk_buff
*skb
, union l3_hdr_info l3
,
1368 u32
*type_cs_vlan_tso
)
1370 if (l3
.v4
->version
== 4) {
1371 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_S
,
1374 /* the stack computes the IP header already, the only time we
1375 * need the hardware to recompute it is in the case of TSO.
1377 if (skb_is_gso(skb
))
1378 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
1379 } else if (l3
.v6
->version
== 6) {
1380 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_S
,
1385 static int hns3_set_l4_csum_length(struct sk_buff
*skb
, union l4_hdr_info l4
,
1386 u32 l4_proto
, u32
*type_cs_vlan_tso
)
1388 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1391 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1392 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1394 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1398 if (hns3_tunnel_csum_bug(skb
)) {
1399 int ret
= skb_put_padto(skb
, HNS3_MIN_TUN_PKT_LEN
);
1401 return ret
? ret
: skb_checksum_help(skb
);
1404 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1405 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1407 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1408 (sizeof(struct udphdr
) >> 2));
1411 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1412 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1414 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1415 (sizeof(struct sctphdr
) >> 2));
1418 /* drop the skb tunnel packet if hardware don't support,
1419 * because hardware can't calculate csum when TSO.
1421 if (skb_is_gso(skb
))
1424 /* the stack computes the IP header already,
1425 * driver calculate l4 checksum when not TSO.
1427 return skb_checksum_help(skb
);
1433 static int hns3_set_l2l3l4(struct sk_buff
*skb
, u8 ol4_proto
,
1434 u8 il4_proto
, u32
*type_cs_vlan_tso
,
1435 u32
*ol_type_vlan_len_msec
)
1437 unsigned char *l2_hdr
= skb
->data
;
1438 u32 l4_proto
= ol4_proto
;
1439 union l4_hdr_info l4
;
1440 union l3_hdr_info l3
;
1443 l4
.hdr
= skb_transport_header(skb
);
1444 l3
.hdr
= skb_network_header(skb
);
1446 /* handle encapsulation skb */
1447 if (skb
->encapsulation
) {
1448 /* If this is a not UDP/GRE encapsulation skb */
1449 if (!(ol4_proto
== IPPROTO_UDP
|| ol4_proto
== IPPROTO_GRE
)) {
1450 /* drop the skb tunnel packet if hardware don't support,
1451 * because hardware can't calculate csum when TSO.
1453 if (skb_is_gso(skb
))
1456 /* the stack computes the IP header already,
1457 * driver calculate l4 checksum when not TSO.
1459 return skb_checksum_help(skb
);
1462 hns3_set_outer_l2l3l4(skb
, ol4_proto
, ol_type_vlan_len_msec
);
1464 /* switch to inner header */
1465 l2_hdr
= skb_inner_mac_header(skb
);
1466 l3
.hdr
= skb_inner_network_header(skb
);
1467 l4
.hdr
= skb_inner_transport_header(skb
);
1468 l4_proto
= il4_proto
;
1471 hns3_set_l3_type(skb
, l3
, type_cs_vlan_tso
);
1473 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1474 l2_len
= l3
.hdr
- l2_hdr
;
1475 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_S
, l2_len
>> 1);
1477 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1478 l3_len
= l4
.hdr
- l3
.hdr
;
1479 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_S
, l3_len
>> 2);
1481 return hns3_set_l4_csum_length(skb
, l4
, l4_proto
, type_cs_vlan_tso
);
1484 static int hns3_handle_vtags(struct hns3_enet_ring
*tx_ring
,
1485 struct sk_buff
*skb
)
1487 struct hnae3_handle
*handle
= tx_ring
->tqp
->handle
;
1488 struct hnae3_ae_dev
*ae_dev
;
1489 struct vlan_ethhdr
*vhdr
;
1492 if (!(skb
->protocol
== htons(ETH_P_8021Q
) ||
1493 skb_vlan_tag_present(skb
)))
1496 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1497 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1498 * will cause RAS error.
1500 ae_dev
= pci_get_drvdata(handle
->pdev
);
1501 if (unlikely(skb_vlan_tagged_multi(skb
) &&
1502 ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
&&
1503 handle
->port_base_vlan_state
==
1504 HNAE3_PORT_BASE_VLAN_ENABLE
))
1507 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
1508 !(handle
->kinfo
.netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
1509 /* When HW VLAN acceleration is turned off, and the stack
1510 * sets the protocol to 802.1q, the driver just need to
1511 * set the protocol to the encapsulated ethertype.
1513 skb
->protocol
= vlan_get_protocol(skb
);
1517 if (skb_vlan_tag_present(skb
)) {
1518 /* Based on hw strategy, use out_vtag in two layer tag case,
1519 * and use inner_vtag in one tag case.
1521 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
1522 handle
->port_base_vlan_state
==
1523 HNAE3_PORT_BASE_VLAN_DISABLE
)
1524 rc
= HNS3_OUTER_VLAN_TAG
;
1526 rc
= HNS3_INNER_VLAN_TAG
;
1528 skb
->protocol
= vlan_get_protocol(skb
);
1532 rc
= skb_cow_head(skb
, 0);
1533 if (unlikely(rc
< 0))
1536 vhdr
= skb_vlan_eth_hdr(skb
);
1537 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
<< VLAN_PRIO_SHIFT
)
1540 skb
->protocol
= vlan_get_protocol(skb
);
1544 /* check if the hardware is capable of checksum offloading */
1545 static bool hns3_check_hw_tx_csum(struct sk_buff
*skb
)
1547 struct hns3_nic_priv
*priv
= netdev_priv(skb
->dev
);
1549 /* Kindly note, due to backward compatibility of the TX descriptor,
1550 * HW checksum of the non-IP packets and GSO packets is handled at
1551 * different place in the following code
1553 if (skb_csum_is_sctp(skb
) || skb_is_gso(skb
) ||
1554 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE
, &priv
->state
))
1560 struct hns3_desc_param
{
1562 u32 ol_type_vlan_len_msec
;
1563 u32 type_cs_vlan_tso
;
1569 static void hns3_init_desc_data(struct sk_buff
*skb
, struct hns3_desc_param
*pa
)
1571 pa
->paylen_ol4cs
= skb
->len
;
1572 pa
->ol_type_vlan_len_msec
= 0;
1573 pa
->type_cs_vlan_tso
= 0;
1574 pa
->mss_hw_csum
= 0;
1579 static int hns3_handle_vlan_info(struct hns3_enet_ring
*ring
,
1580 struct sk_buff
*skb
,
1581 struct hns3_desc_param
*param
)
1585 ret
= hns3_handle_vtags(ring
, skb
);
1586 if (unlikely(ret
< 0)) {
1587 hns3_ring_stats_update(ring
, tx_vlan_err
);
1589 } else if (ret
== HNS3_INNER_VLAN_TAG
) {
1590 param
->inner_vtag
= skb_vlan_tag_get(skb
);
1591 param
->inner_vtag
|= (skb
->priority
<< VLAN_PRIO_SHIFT
) &
1593 hns3_set_field(param
->type_cs_vlan_tso
, HNS3_TXD_VLAN_B
, 1);
1594 } else if (ret
== HNS3_OUTER_VLAN_TAG
) {
1595 param
->out_vtag
= skb_vlan_tag_get(skb
);
1596 param
->out_vtag
|= (skb
->priority
<< VLAN_PRIO_SHIFT
) &
1598 hns3_set_field(param
->ol_type_vlan_len_msec
, HNS3_TXD_OVLAN_B
,
1604 static int hns3_handle_csum_partial(struct hns3_enet_ring
*ring
,
1605 struct sk_buff
*skb
,
1606 struct hns3_desc_cb
*desc_cb
,
1607 struct hns3_desc_param
*param
)
1609 u8 ol4_proto
, il4_proto
;
1612 if (hns3_check_hw_tx_csum(skb
)) {
1613 /* set checksum start and offset, defined in 2 Bytes */
1614 hns3_set_field(param
->type_cs_vlan_tso
, HNS3_TXD_CSUM_START_S
,
1615 skb_checksum_start_offset(skb
) >> 1);
1616 hns3_set_field(param
->ol_type_vlan_len_msec
,
1617 HNS3_TXD_CSUM_OFFSET_S
,
1618 skb
->csum_offset
>> 1);
1619 param
->mss_hw_csum
|= BIT(HNS3_TXD_HW_CS_B
);
1623 skb_reset_mac_len(skb
);
1625 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
1626 if (unlikely(ret
< 0)) {
1627 hns3_ring_stats_update(ring
, tx_l4_proto_err
);
1631 ret
= hns3_set_l2l3l4(skb
, ol4_proto
, il4_proto
,
1632 ¶m
->type_cs_vlan_tso
,
1633 ¶m
->ol_type_vlan_len_msec
);
1634 if (unlikely(ret
< 0)) {
1635 hns3_ring_stats_update(ring
, tx_l2l3l4_err
);
1639 ret
= hns3_set_tso(skb
, ¶m
->paylen_ol4cs
, ¶m
->mss_hw_csum
,
1640 ¶m
->type_cs_vlan_tso
, &desc_cb
->send_bytes
);
1641 if (unlikely(ret
< 0)) {
1642 hns3_ring_stats_update(ring
, tx_tso_err
);
1648 static int hns3_fill_skb_desc(struct hns3_enet_ring
*ring
,
1649 struct sk_buff
*skb
, struct hns3_desc
*desc
,
1650 struct hns3_desc_cb
*desc_cb
)
1652 struct hns3_desc_param param
;
1655 hns3_init_desc_data(skb
, ¶m
);
1656 ret
= hns3_handle_vlan_info(ring
, skb
, ¶m
);
1657 if (unlikely(ret
< 0))
1660 desc_cb
->send_bytes
= skb
->len
;
1662 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1663 ret
= hns3_handle_csum_partial(ring
, skb
, desc_cb
, ¶m
);
1669 desc
->tx
.ol_type_vlan_len_msec
=
1670 cpu_to_le32(param
.ol_type_vlan_len_msec
);
1671 desc
->tx
.type_cs_vlan_tso_len
= cpu_to_le32(param
.type_cs_vlan_tso
);
1672 desc
->tx
.paylen_ol4cs
= cpu_to_le32(param
.paylen_ol4cs
);
1673 desc
->tx
.mss_hw_csum
= cpu_to_le16(param
.mss_hw_csum
);
1674 desc
->tx
.vlan_tag
= cpu_to_le16(param
.inner_vtag
);
1675 desc
->tx
.outer_vlan_tag
= cpu_to_le16(param
.out_vtag
);
1680 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, dma_addr_t dma
,
1683 #define HNS3_LIKELY_BD_NUM 1
1685 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1686 unsigned int frag_buf_num
;
1689 if (likely(size
<= HNS3_MAX_BD_SIZE
)) {
1690 desc
->addr
= cpu_to_le64(dma
);
1691 desc
->tx
.send_size
= cpu_to_le16(size
);
1692 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1693 cpu_to_le16(BIT(HNS3_TXD_VLD_B
));
1695 trace_hns3_tx_desc(ring
, ring
->next_to_use
);
1696 ring_ptr_move_fw(ring
, next_to_use
);
1697 return HNS3_LIKELY_BD_NUM
;
1700 frag_buf_num
= hns3_tx_bd_count(size
);
1701 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
1702 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
1704 /* When frag size is bigger than hardware limit, split this frag */
1705 for (k
= 0; k
< frag_buf_num
; k
++) {
1706 /* now, fill the descriptor */
1707 desc
->addr
= cpu_to_le64(dma
+ HNS3_MAX_BD_SIZE
* k
);
1708 desc
->tx
.send_size
= cpu_to_le16((k
== frag_buf_num
- 1) ?
1709 (u16
)sizeoflast
: (u16
)HNS3_MAX_BD_SIZE
);
1710 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1711 cpu_to_le16(BIT(HNS3_TXD_VLD_B
));
1713 trace_hns3_tx_desc(ring
, ring
->next_to_use
);
1714 /* move ring pointer to next */
1715 ring_ptr_move_fw(ring
, next_to_use
);
1717 desc
= &ring
->desc
[ring
->next_to_use
];
1720 return frag_buf_num
;
1723 static int hns3_map_and_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
1726 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1727 struct device
*dev
= ring_to_dev(ring
);
1731 if (type
& (DESC_TYPE_FRAGLIST_SKB
| DESC_TYPE_SKB
)) {
1732 struct sk_buff
*skb
= (struct sk_buff
*)priv
;
1734 size
= skb_headlen(skb
);
1738 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1739 } else if (type
& DESC_TYPE_BOUNCE_HEAD
) {
1740 /* Head data has been filled in hns3_handle_tx_bounce(),
1741 * just return 0 here.
1745 skb_frag_t
*frag
= (skb_frag_t
*)priv
;
1747 size
= skb_frag_size(frag
);
1751 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1754 if (unlikely(dma_mapping_error(dev
, dma
))) {
1755 hns3_ring_stats_update(ring
, sw_err_cnt
);
1759 desc_cb
->priv
= priv
;
1760 desc_cb
->length
= size
;
1762 desc_cb
->type
= type
;
1764 return hns3_fill_desc(ring
, dma
, size
);
1767 static unsigned int hns3_skb_bd_num(struct sk_buff
*skb
, unsigned int *bd_size
,
1768 unsigned int bd_num
)
1773 size
= skb_headlen(skb
);
1774 while (size
> HNS3_MAX_BD_SIZE
) {
1775 bd_size
[bd_num
++] = HNS3_MAX_BD_SIZE
;
1776 size
-= HNS3_MAX_BD_SIZE
;
1778 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1783 bd_size
[bd_num
++] = size
;
1784 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1788 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1789 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1790 size
= skb_frag_size(frag
);
1794 while (size
> HNS3_MAX_BD_SIZE
) {
1795 bd_size
[bd_num
++] = HNS3_MAX_BD_SIZE
;
1796 size
-= HNS3_MAX_BD_SIZE
;
1798 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1802 bd_size
[bd_num
++] = size
;
1803 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1810 static unsigned int hns3_tx_bd_num(struct sk_buff
*skb
, unsigned int *bd_size
,
1811 u8 max_non_tso_bd_num
, unsigned int bd_num
,
1812 unsigned int recursion_level
)
1814 #define HNS3_MAX_RECURSION_LEVEL 24
1816 struct sk_buff
*frag_skb
;
1818 /* If the total len is within the max bd limit */
1819 if (likely(skb
->len
<= HNS3_MAX_BD_SIZE
&& !recursion_level
&&
1820 !skb_has_frag_list(skb
) &&
1821 skb_shinfo(skb
)->nr_frags
< max_non_tso_bd_num
))
1822 return skb_shinfo(skb
)->nr_frags
+ 1U;
1824 if (unlikely(recursion_level
>= HNS3_MAX_RECURSION_LEVEL
))
1827 bd_num
= hns3_skb_bd_num(skb
, bd_size
, bd_num
);
1828 if (!skb_has_frag_list(skb
) || bd_num
> HNS3_MAX_TSO_BD_NUM
)
1831 skb_walk_frags(skb
, frag_skb
) {
1832 bd_num
= hns3_tx_bd_num(frag_skb
, bd_size
, max_non_tso_bd_num
,
1833 bd_num
, recursion_level
+ 1);
1834 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1841 static unsigned int hns3_gso_hdr_len(struct sk_buff
*skb
)
1843 if (!skb
->encapsulation
)
1844 return skb_tcp_all_headers(skb
);
1846 return skb_inner_tcp_all_headers(skb
);
1849 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1850 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1851 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1852 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1853 * than MSS except the last max_non_tso_bd_num - 1 frags.
1855 static bool hns3_skb_need_linearized(struct sk_buff
*skb
, unsigned int *bd_size
,
1856 unsigned int bd_num
, u8 max_non_tso_bd_num
)
1858 unsigned int tot_len
= 0;
1861 for (i
= 0; i
< max_non_tso_bd_num
- 1U; i
++)
1862 tot_len
+= bd_size
[i
];
1864 /* ensure the first max_non_tso_bd_num frags is greater than
1867 if (tot_len
+ bd_size
[max_non_tso_bd_num
- 1U] <
1868 skb_shinfo(skb
)->gso_size
+ hns3_gso_hdr_len(skb
))
1871 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1872 * than mss except the last one.
1874 for (i
= 0; i
< bd_num
- max_non_tso_bd_num
; i
++) {
1875 tot_len
-= bd_size
[i
];
1876 tot_len
+= bd_size
[i
+ max_non_tso_bd_num
- 1U];
1878 if (tot_len
< skb_shinfo(skb
)->gso_size
)
1885 void hns3_shinfo_pack(struct skb_shared_info
*shinfo
, __u32
*size
)
1889 for (i
= 0; i
< MAX_SKB_FRAGS
; i
++)
1890 size
[i
] = skb_frag_size(&shinfo
->frags
[i
]);
1893 static int hns3_skb_linearize(struct hns3_enet_ring
*ring
,
1894 struct sk_buff
*skb
,
1895 unsigned int bd_num
)
1897 /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1898 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1900 if (bd_num
== UINT_MAX
) {
1901 hns3_ring_stats_update(ring
, over_max_recursion
);
1905 /* The skb->len has exceeded the hw limitation, linearization
1908 if (skb
->len
> HNS3_MAX_TSO_SIZE
||
1909 (!skb_is_gso(skb
) && skb
->len
> HNS3_MAX_NON_TSO_SIZE
)) {
1910 hns3_ring_stats_update(ring
, hw_limitation
);
1914 if (__skb_linearize(skb
)) {
1915 hns3_ring_stats_update(ring
, sw_err_cnt
);
1922 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring
*ring
,
1923 struct net_device
*netdev
,
1924 struct sk_buff
*skb
)
1926 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1927 u8 max_non_tso_bd_num
= priv
->max_non_tso_bd_num
;
1928 unsigned int bd_size
[HNS3_MAX_TSO_BD_NUM
+ 1U];
1929 unsigned int bd_num
;
1931 bd_num
= hns3_tx_bd_num(skb
, bd_size
, max_non_tso_bd_num
, 0, 0);
1932 if (unlikely(bd_num
> max_non_tso_bd_num
)) {
1933 if (bd_num
<= HNS3_MAX_TSO_BD_NUM
&& skb_is_gso(skb
) &&
1934 !hns3_skb_need_linearized(skb
, bd_size
, bd_num
,
1935 max_non_tso_bd_num
)) {
1936 trace_hns3_over_max_bd(skb
);
1940 if (hns3_skb_linearize(ring
, skb
, bd_num
))
1943 bd_num
= hns3_tx_bd_count(skb
->len
);
1945 hns3_ring_stats_update(ring
, tx_copy
);
1949 if (likely(ring_space(ring
) >= bd_num
))
1952 netif_stop_subqueue(netdev
, ring
->queue_index
);
1953 smp_mb(); /* Memory barrier before checking ring_space */
1955 /* Start queue in case hns3_clean_tx_ring has just made room
1956 * available and has not seen the queue stopped state performed
1957 * by netif_stop_subqueue above.
1959 if (ring_space(ring
) >= bd_num
&& netif_carrier_ok(netdev
) &&
1960 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
1961 netif_start_subqueue(netdev
, ring
->queue_index
);
1965 hns3_ring_stats_update(ring
, tx_busy
);
1970 static void hns3_clear_desc(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1972 struct device
*dev
= ring_to_dev(ring
);
1975 for (i
= 0; i
< ring
->desc_num
; i
++) {
1976 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1977 struct hns3_desc_cb
*desc_cb
;
1979 memset(desc
, 0, sizeof(*desc
));
1981 /* check if this is where we started */
1982 if (ring
->next_to_use
== next_to_use_orig
)
1986 ring_ptr_move_bw(ring
, next_to_use
);
1988 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1993 /* unmap the descriptor dma address */
1994 if (desc_cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_FRAGLIST_SKB
))
1995 dma_unmap_single(dev
, desc_cb
->dma
, desc_cb
->length
,
1997 else if (desc_cb
->type
&
1998 (DESC_TYPE_BOUNCE_HEAD
| DESC_TYPE_BOUNCE_ALL
))
1999 hns3_tx_spare_rollback(ring
, desc_cb
->length
);
2000 else if (desc_cb
->length
)
2001 dma_unmap_page(dev
, desc_cb
->dma
, desc_cb
->length
,
2004 desc_cb
->length
= 0;
2006 desc_cb
->type
= DESC_TYPE_UNKNOWN
;
2010 static int hns3_fill_skb_to_desc(struct hns3_enet_ring
*ring
,
2011 struct sk_buff
*skb
, unsigned int type
)
2013 struct sk_buff
*frag_skb
;
2014 int i
, ret
, bd_num
= 0;
2016 ret
= hns3_map_and_fill_desc(ring
, skb
, type
);
2017 if (unlikely(ret
< 0))
2022 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2023 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2025 ret
= hns3_map_and_fill_desc(ring
, frag
, DESC_TYPE_PAGE
);
2026 if (unlikely(ret
< 0))
2032 skb_walk_frags(skb
, frag_skb
) {
2033 ret
= hns3_fill_skb_to_desc(ring
, frag_skb
,
2034 DESC_TYPE_FRAGLIST_SKB
);
2035 if (unlikely(ret
< 0))
2044 static void hns3_tx_push_bd(struct hns3_enet_ring
*ring
, int num
)
2046 #define HNS3_BYTES_PER_64BIT 8
2048 struct hns3_desc desc
[HNS3_MAX_PUSH_BD_NUM
] = {};
2051 /* make sure everything is visible to device before
2052 * excuting tx push or updating doorbell
2057 int idx
= (ring
->next_to_use
- num
+ ring
->desc_num
) %
2060 u64_stats_update_begin(&ring
->syncp
);
2061 ring
->stats
.tx_push
++;
2062 u64_stats_update_end(&ring
->syncp
);
2063 memcpy(&desc
[offset
], &ring
->desc
[idx
],
2064 sizeof(struct hns3_desc
));
2068 __iowrite64_copy(ring
->tqp
->mem_base
, desc
,
2069 (sizeof(struct hns3_desc
) * HNS3_MAX_PUSH_BD_NUM
) /
2070 HNS3_BYTES_PER_64BIT
);
2075 static void hns3_tx_mem_doorbell(struct hns3_enet_ring
*ring
)
2077 #define HNS3_MEM_DOORBELL_OFFSET 64
2079 __le64 bd_num
= cpu_to_le64((u64
)ring
->pending_buf
);
2081 /* make sure everything is visible to device before
2082 * excuting tx push or updating doorbell
2086 __iowrite64_copy(ring
->tqp
->mem_base
+ HNS3_MEM_DOORBELL_OFFSET
,
2088 u64_stats_update_begin(&ring
->syncp
);
2089 ring
->stats
.tx_mem_doorbell
+= ring
->pending_buf
;
2090 u64_stats_update_end(&ring
->syncp
);
2095 static void hns3_tx_doorbell(struct hns3_enet_ring
*ring
, int num
,
2098 struct net_device
*netdev
= ring_to_netdev(ring
);
2099 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2101 /* when tx push is enabled, the packet whose number of BD below
2102 * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
2104 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE
, &priv
->state
) && num
&&
2105 !ring
->pending_buf
&& num
<= HNS3_MAX_PUSH_BD_NUM
&& doorbell
) {
2106 /* This smp_store_release() pairs with smp_load_aquire() in
2107 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
2110 smp_store_release(&ring
->last_to_use
, ring
->next_to_use
);
2111 hns3_tx_push_bd(ring
, num
);
2115 ring
->pending_buf
+= num
;
2118 hns3_ring_stats_update(ring
, tx_more
);
2122 /* This smp_store_release() pairs with smp_load_aquire() in
2123 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
2125 smp_store_release(&ring
->last_to_use
, ring
->next_to_use
);
2127 if (ring
->tqp
->mem_base
)
2128 hns3_tx_mem_doorbell(ring
);
2130 writel(ring
->pending_buf
,
2131 ring
->tqp
->io_base
+ HNS3_RING_TX_RING_TAIL_REG
);
2133 ring
->pending_buf
= 0;
2136 static void hns3_tsyn(struct net_device
*netdev
, struct sk_buff
*skb
,
2137 struct hns3_desc
*desc
)
2139 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2141 if (!(h
->ae_algo
->ops
->set_tx_hwts_info
&&
2142 h
->ae_algo
->ops
->set_tx_hwts_info(h
, skb
)))
2145 desc
->tx
.bdtp_fe_sc_vld_ra_ri
|= cpu_to_le16(BIT(HNS3_TXD_TSYN_B
));
2148 static int hns3_handle_tx_bounce(struct hns3_enet_ring
*ring
,
2149 struct sk_buff
*skb
)
2151 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2152 unsigned int type
= DESC_TYPE_BOUNCE_HEAD
;
2153 unsigned int size
= skb_headlen(skb
);
2160 if (skb
->len
<= ring
->tx_copybreak
) {
2162 type
= DESC_TYPE_BOUNCE_ALL
;
2165 /* hns3_can_use_tx_bounce() is called to ensure the below
2166 * function can always return the tx buffer.
2168 buf
= hns3_tx_spare_alloc(ring
, size
, &dma
, &cb_len
);
2170 ret
= skb_copy_bits(skb
, 0, buf
, size
);
2171 if (unlikely(ret
< 0)) {
2172 hns3_tx_spare_rollback(ring
, cb_len
);
2173 hns3_ring_stats_update(ring
, copy_bits_err
);
2177 desc_cb
->priv
= skb
;
2178 desc_cb
->length
= cb_len
;
2180 desc_cb
->type
= type
;
2182 bd_num
+= hns3_fill_desc(ring
, dma
, size
);
2184 if (type
== DESC_TYPE_BOUNCE_HEAD
) {
2185 ret
= hns3_fill_skb_to_desc(ring
, skb
,
2186 DESC_TYPE_BOUNCE_HEAD
);
2187 if (unlikely(ret
< 0))
2193 dma_sync_single_for_device(ring_to_dev(ring
), dma
, size
,
2196 hns3_ring_stats_update(ring
, tx_bounce
);
2201 static int hns3_handle_tx_sgl(struct hns3_enet_ring
*ring
,
2202 struct sk_buff
*skb
)
2204 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2205 u32 nfrag
= skb_shinfo(skb
)->nr_frags
+ 1;
2206 struct sg_table
*sgt
;
2212 if (skb_has_frag_list(skb
))
2213 nfrag
= HNS3_MAX_TSO_BD_NUM
;
2215 /* hns3_can_use_tx_sgl() is called to ensure the below
2216 * function can always return the tx buffer.
2218 sgt
= hns3_tx_spare_alloc(ring
, HNS3_SGL_SIZE(nfrag
),
2221 /* scatterlist follows by the sg table */
2222 sgt
->sgl
= (struct scatterlist
*)(sgt
+ 1);
2223 sg_init_table(sgt
->sgl
, nfrag
);
2224 nents
= skb_to_sgvec(skb
, sgt
->sgl
, 0, skb
->len
);
2225 if (unlikely(nents
< 0)) {
2226 hns3_tx_spare_rollback(ring
, cb_len
);
2227 hns3_ring_stats_update(ring
, skb2sgl_err
);
2231 sgt
->orig_nents
= nents
;
2232 sgt
->nents
= dma_map_sg(ring_to_dev(ring
), sgt
->sgl
, sgt
->orig_nents
,
2234 if (unlikely(!sgt
->nents
)) {
2235 hns3_tx_spare_rollback(ring
, cb_len
);
2236 hns3_ring_stats_update(ring
, map_sg_err
);
2240 desc_cb
->priv
= skb
;
2241 desc_cb
->length
= cb_len
;
2243 desc_cb
->type
= DESC_TYPE_SGL_SKB
;
2245 for (i
= 0; i
< sgt
->nents
; i
++)
2246 bd_num
+= hns3_fill_desc(ring
, sg_dma_address(sgt
->sgl
+ i
),
2247 sg_dma_len(sgt
->sgl
+ i
));
2248 hns3_ring_stats_update(ring
, tx_sgl
);
2253 static int hns3_handle_desc_filling(struct hns3_enet_ring
*ring
,
2254 struct sk_buff
*skb
)
2258 if (!ring
->tx_spare
)
2261 space
= hns3_tx_spare_space(ring
);
2263 if (hns3_can_use_tx_sgl(ring
, skb
, space
))
2264 return hns3_handle_tx_sgl(ring
, skb
);
2266 if (hns3_can_use_tx_bounce(ring
, skb
, space
))
2267 return hns3_handle_tx_bounce(ring
, skb
);
2270 return hns3_fill_skb_to_desc(ring
, skb
, DESC_TYPE_SKB
);
2273 static int hns3_handle_skb_desc(struct hns3_enet_ring
*ring
,
2274 struct sk_buff
*skb
,
2275 struct hns3_desc_cb
*desc_cb
,
2276 int next_to_use_head
)
2280 ret
= hns3_fill_skb_desc(ring
, skb
, &ring
->desc
[ring
->next_to_use
],
2282 if (unlikely(ret
< 0))
2285 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2286 * zero, which is unlikely, and 'ret > 0' means how many tx desc
2287 * need to be notified to the hw.
2289 ret
= hns3_handle_desc_filling(ring
, skb
);
2290 if (likely(ret
> 0))
2294 hns3_clear_desc(ring
, next_to_use_head
);
2298 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2300 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2301 struct hns3_enet_ring
*ring
= &priv
->ring
[skb
->queue_mapping
];
2302 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2303 struct netdev_queue
*dev_queue
;
2307 /* Hardware can only handle short frames above 32 bytes */
2308 if (skb_put_padto(skb
, HNS3_MIN_TX_LEN
)) {
2309 hns3_tx_doorbell(ring
, 0, !netdev_xmit_more());
2311 hns3_ring_stats_update(ring
, sw_err_cnt
);
2313 return NETDEV_TX_OK
;
2316 /* Prefetch the data used later */
2317 prefetch(skb
->data
);
2319 ret
= hns3_nic_maybe_stop_tx(ring
, netdev
, skb
);
2320 if (unlikely(ret
<= 0)) {
2321 if (ret
== -EBUSY
) {
2322 hns3_tx_doorbell(ring
, 0, true);
2323 return NETDEV_TX_BUSY
;
2326 hns3_rl_err(netdev
, "xmit error: %d!\n", ret
);
2330 ret
= hns3_handle_skb_desc(ring
, skb
, desc_cb
, ring
->next_to_use
);
2331 if (unlikely(ret
<= 0))
2334 pre_ntu
= ring
->next_to_use
? (ring
->next_to_use
- 1) :
2335 (ring
->desc_num
- 1);
2337 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
2338 hns3_tsyn(netdev
, skb
, &ring
->desc
[pre_ntu
]);
2340 ring
->desc
[pre_ntu
].tx
.bdtp_fe_sc_vld_ra_ri
|=
2341 cpu_to_le16(BIT(HNS3_TXD_FE_B
));
2342 trace_hns3_tx_desc(ring
, pre_ntu
);
2344 skb_tx_timestamp(skb
);
2346 /* Complete translate all packets */
2347 dev_queue
= netdev_get_tx_queue(netdev
, ring
->queue_index
);
2348 doorbell
= __netdev_tx_sent_queue(dev_queue
, desc_cb
->send_bytes
,
2349 netdev_xmit_more());
2350 hns3_tx_doorbell(ring
, ret
, doorbell
);
2352 return NETDEV_TX_OK
;
2355 dev_kfree_skb_any(skb
);
2356 hns3_tx_doorbell(ring
, 0, !netdev_xmit_more());
2357 return NETDEV_TX_OK
;
2360 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
2362 char format_mac_addr_perm
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2363 char format_mac_addr_sa
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2364 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2365 struct sockaddr
*mac_addr
= p
;
2368 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
2369 return -EADDRNOTAVAIL
;
2371 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
2372 hnae3_format_mac_addr(format_mac_addr_sa
, mac_addr
->sa_data
);
2373 netdev_info(netdev
, "already using mac address %s\n",
2374 format_mac_addr_sa
);
2378 /* For VF device, if there is a perm_addr, then the user will not
2379 * be allowed to change the address.
2381 if (!hns3_is_phys_func(h
->pdev
) &&
2382 !is_zero_ether_addr(netdev
->perm_addr
)) {
2383 hnae3_format_mac_addr(format_mac_addr_perm
, netdev
->perm_addr
);
2384 hnae3_format_mac_addr(format_mac_addr_sa
, mac_addr
->sa_data
);
2385 netdev_err(netdev
, "has permanent MAC %s, user MAC %s not allow\n",
2386 format_mac_addr_perm
, format_mac_addr_sa
);
2390 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
2392 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
2396 eth_hw_addr_set(netdev
, mac_addr
->sa_data
);
2401 static int hns3_nic_do_ioctl(struct net_device
*netdev
,
2402 struct ifreq
*ifr
, int cmd
)
2404 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2406 if (!netif_running(netdev
))
2409 if (!h
->ae_algo
->ops
->do_ioctl
)
2412 return h
->ae_algo
->ops
->do_ioctl(h
, ifr
, cmd
);
2415 static int hns3_nic_set_features(struct net_device
*netdev
,
2416 netdev_features_t features
)
2418 netdev_features_t changed
= netdev
->features
^ features
;
2419 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2420 struct hnae3_handle
*h
= priv
->ae_handle
;
2424 if (changed
& (NETIF_F_GRO_HW
) && h
->ae_algo
->ops
->set_gro_en
) {
2425 enable
= !!(features
& NETIF_F_GRO_HW
);
2426 ret
= h
->ae_algo
->ops
->set_gro_en(h
, enable
);
2431 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2432 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
2433 enable
= !!(features
& NETIF_F_HW_VLAN_CTAG_RX
);
2434 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, enable
);
2439 if ((changed
& NETIF_F_NTUPLE
) && h
->ae_algo
->ops
->enable_fd
) {
2440 enable
= !!(features
& NETIF_F_NTUPLE
);
2441 h
->ae_algo
->ops
->enable_fd(h
, enable
);
2444 if ((netdev
->features
& NETIF_F_HW_TC
) > (features
& NETIF_F_HW_TC
) &&
2445 h
->ae_algo
->ops
->cls_flower_active(h
)) {
2447 "there are offloaded TC filters active, cannot disable HW TC offload");
2451 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2452 h
->ae_algo
->ops
->enable_vlan_filter
) {
2453 enable
= !!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
);
2454 ret
= h
->ae_algo
->ops
->enable_vlan_filter(h
, enable
);
2459 netdev
->features
= features
;
2463 static netdev_features_t
hns3_features_check(struct sk_buff
*skb
,
2464 struct net_device
*dev
,
2465 netdev_features_t features
)
2467 #define HNS3_MAX_HDR_LEN 480U
2468 #define HNS3_MAX_L4_HDR_LEN 60U
2472 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2475 if (skb
->encapsulation
)
2476 len
= skb_inner_transport_header(skb
) - skb
->data
;
2478 len
= skb_transport_header(skb
) - skb
->data
;
2480 /* Assume L4 is 60 byte as TCP is the only protocol with a
2481 * a flexible value, and it's max len is 60 bytes.
2483 len
+= HNS3_MAX_L4_HDR_LEN
;
2485 /* Hardware only supports checksum on the skb with a max header
2488 if (len
> HNS3_MAX_HDR_LEN
)
2489 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2494 static void hns3_fetch_stats(struct rtnl_link_stats64
*stats
,
2495 struct hns3_enet_ring
*ring
, bool is_tx
)
2500 start
= u64_stats_fetch_begin(&ring
->syncp
);
2502 stats
->tx_bytes
+= ring
->stats
.tx_bytes
;
2503 stats
->tx_packets
+= ring
->stats
.tx_pkts
;
2504 stats
->tx_dropped
+= ring
->stats
.sw_err_cnt
;
2505 stats
->tx_dropped
+= ring
->stats
.tx_vlan_err
;
2506 stats
->tx_dropped
+= ring
->stats
.tx_l4_proto_err
;
2507 stats
->tx_dropped
+= ring
->stats
.tx_l2l3l4_err
;
2508 stats
->tx_dropped
+= ring
->stats
.tx_tso_err
;
2509 stats
->tx_dropped
+= ring
->stats
.over_max_recursion
;
2510 stats
->tx_dropped
+= ring
->stats
.hw_limitation
;
2511 stats
->tx_dropped
+= ring
->stats
.copy_bits_err
;
2512 stats
->tx_dropped
+= ring
->stats
.skb2sgl_err
;
2513 stats
->tx_dropped
+= ring
->stats
.map_sg_err
;
2514 stats
->tx_errors
+= ring
->stats
.sw_err_cnt
;
2515 stats
->tx_errors
+= ring
->stats
.tx_vlan_err
;
2516 stats
->tx_errors
+= ring
->stats
.tx_l4_proto_err
;
2517 stats
->tx_errors
+= ring
->stats
.tx_l2l3l4_err
;
2518 stats
->tx_errors
+= ring
->stats
.tx_tso_err
;
2519 stats
->tx_errors
+= ring
->stats
.over_max_recursion
;
2520 stats
->tx_errors
+= ring
->stats
.hw_limitation
;
2521 stats
->tx_errors
+= ring
->stats
.copy_bits_err
;
2522 stats
->tx_errors
+= ring
->stats
.skb2sgl_err
;
2523 stats
->tx_errors
+= ring
->stats
.map_sg_err
;
2525 stats
->rx_bytes
+= ring
->stats
.rx_bytes
;
2526 stats
->rx_packets
+= ring
->stats
.rx_pkts
;
2527 stats
->rx_dropped
+= ring
->stats
.l2_err
;
2528 stats
->rx_errors
+= ring
->stats
.l2_err
;
2529 stats
->rx_errors
+= ring
->stats
.l3l4_csum_err
;
2530 stats
->rx_crc_errors
+= ring
->stats
.l2_err
;
2531 stats
->multicast
+= ring
->stats
.rx_multicast
;
2532 stats
->rx_length_errors
+= ring
->stats
.err_pkt_len
;
2534 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
2537 static void hns3_nic_get_stats64(struct net_device
*netdev
,
2538 struct rtnl_link_stats64
*stats
)
2540 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2541 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2542 struct hnae3_handle
*handle
= priv
->ae_handle
;
2543 struct rtnl_link_stats64 ring_total_stats
;
2544 struct hns3_enet_ring
*ring
;
2547 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
2550 handle
->ae_algo
->ops
->update_stats(handle
);
2552 memset(&ring_total_stats
, 0, sizeof(ring_total_stats
));
2553 for (idx
= 0; idx
< queue_num
; idx
++) {
2554 /* fetch the tx stats */
2555 ring
= &priv
->ring
[idx
];
2556 hns3_fetch_stats(&ring_total_stats
, ring
, true);
2558 /* fetch the rx stats */
2559 ring
= &priv
->ring
[idx
+ queue_num
];
2560 hns3_fetch_stats(&ring_total_stats
, ring
, false);
2563 stats
->tx_bytes
= ring_total_stats
.tx_bytes
;
2564 stats
->tx_packets
= ring_total_stats
.tx_packets
;
2565 stats
->rx_bytes
= ring_total_stats
.rx_bytes
;
2566 stats
->rx_packets
= ring_total_stats
.rx_packets
;
2568 stats
->rx_errors
= ring_total_stats
.rx_errors
;
2569 stats
->multicast
= ring_total_stats
.multicast
;
2570 stats
->rx_length_errors
= ring_total_stats
.rx_length_errors
;
2571 stats
->rx_crc_errors
= ring_total_stats
.rx_crc_errors
;
2572 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
2574 stats
->tx_errors
= ring_total_stats
.tx_errors
;
2575 stats
->rx_dropped
= ring_total_stats
.rx_dropped
;
2576 stats
->tx_dropped
= ring_total_stats
.tx_dropped
;
2577 stats
->collisions
= netdev
->stats
.collisions
;
2578 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
2579 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
2580 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
2581 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
2582 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
2583 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
2584 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
2585 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
2586 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
2587 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
2590 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
2592 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
2593 struct hnae3_knic_private_info
*kinfo
;
2594 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
2595 u16 mode
= mqprio_qopt
->mode
;
2596 u8 hw
= mqprio_qopt
->qopt
.hw
;
2597 struct hnae3_handle
*h
;
2599 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
2600 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
2603 if (tc
> HNAE3_MAX_TC
)
2609 h
= hns3_get_handle(netdev
);
2612 netif_dbg(h
, drv
, netdev
, "setup tc: num_tc=%u\n", tc
);
2614 return (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
2615 kinfo
->dcb_ops
->setup_tc(h
, mqprio_qopt
) : -EOPNOTSUPP
;
2618 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv
*priv
,
2619 struct flow_cls_offload
*flow
)
2621 int tc
= tc_classid_to_hwtc(priv
->netdev
, flow
->classid
);
2622 struct hnae3_handle
*h
= hns3_get_handle(priv
->netdev
);
2624 switch (flow
->command
) {
2625 case FLOW_CLS_REPLACE
:
2626 if (h
->ae_algo
->ops
->add_cls_flower
)
2627 return h
->ae_algo
->ops
->add_cls_flower(h
, flow
, tc
);
2629 case FLOW_CLS_DESTROY
:
2630 if (h
->ae_algo
->ops
->del_cls_flower
)
2631 return h
->ae_algo
->ops
->del_cls_flower(h
, flow
);
2640 static int hns3_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
2643 struct hns3_nic_priv
*priv
= cb_priv
;
2645 if (!tc_cls_can_offload_and_chain0(priv
->netdev
, type_data
))
2649 case TC_SETUP_CLSFLOWER
:
2650 return hns3_setup_tc_cls_flower(priv
, type_data
);
2656 static LIST_HEAD(hns3_block_cb_list
);
2658 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
2661 struct hns3_nic_priv
*priv
= netdev_priv(dev
);
2665 case TC_SETUP_QDISC_MQPRIO
:
2666 ret
= hns3_setup_tc(dev
, type_data
);
2668 case TC_SETUP_BLOCK
:
2669 ret
= flow_block_cb_setup_simple(type_data
,
2670 &hns3_block_cb_list
,
2671 hns3_setup_tc_block_cb
,
2681 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
2682 __be16 proto
, u16 vid
)
2684 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2687 if (h
->ae_algo
->ops
->set_vlan_filter
)
2688 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
2693 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
2694 __be16 proto
, u16 vid
)
2696 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2699 if (h
->ae_algo
->ops
->set_vlan_filter
)
2700 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
2705 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
2706 u8 qos
, __be16 vlan_proto
)
2708 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2711 netif_dbg(h
, drv
, netdev
,
2712 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2713 vf
, vlan
, qos
, ntohs(vlan_proto
));
2715 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
2716 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
2722 static int hns3_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool enable
)
2724 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
2726 if (hns3_nic_resetting(netdev
))
2729 if (!handle
->ae_algo
->ops
->set_vf_spoofchk
)
2732 return handle
->ae_algo
->ops
->set_vf_spoofchk(handle
, vf
, enable
);
2735 static int hns3_set_vf_trust(struct net_device
*netdev
, int vf
, bool enable
)
2737 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
2739 if (!handle
->ae_algo
->ops
->set_vf_trust
)
2742 return handle
->ae_algo
->ops
->set_vf_trust(handle
, vf
, enable
);
2745 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
2747 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2750 if (hns3_nic_resetting(netdev
))
2753 if (!h
->ae_algo
->ops
->set_mtu
)
2756 netif_dbg(h
, drv
, netdev
,
2757 "change mtu from %u to %d\n", netdev
->mtu
, new_mtu
);
2759 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
2761 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
2764 netdev
->mtu
= new_mtu
;
2769 static int hns3_get_timeout_queue(struct net_device
*ndev
)
2773 /* Find the stopped queue the same way the stack does */
2774 for (i
= 0; i
< ndev
->num_tx_queues
; i
++) {
2775 struct netdev_queue
*q
;
2776 unsigned long trans_start
;
2778 q
= netdev_get_tx_queue(ndev
, i
);
2779 trans_start
= READ_ONCE(q
->trans_start
);
2780 if (netif_xmit_stopped(q
) &&
2782 (trans_start
+ ndev
->watchdog_timeo
))) {
2784 struct dql
*dql
= &q
->dql
;
2786 netdev_info(ndev
, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
2787 dql
->last_obj_cnt
, dql
->num_queued
,
2788 dql
->adj_limit
, dql
->num_completed
);
2790 netdev_info(ndev
, "queue state: 0x%lx, delta msecs: %u\n",
2792 jiffies_to_msecs(jiffies
- trans_start
));
2800 static void hns3_dump_queue_stats(struct net_device
*ndev
,
2801 struct hns3_enet_ring
*tx_ring
,
2804 struct napi_struct
*napi
= &tx_ring
->tqp_vector
->napi
;
2805 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2808 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2809 priv
->tx_timeout_count
, timeout_queue
, tx_ring
->next_to_use
,
2810 tx_ring
->next_to_clean
, napi
->state
);
2813 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2814 tx_ring
->stats
.tx_pkts
, tx_ring
->stats
.tx_bytes
,
2815 tx_ring
->stats
.sw_err_cnt
, tx_ring
->pending_buf
);
2818 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2819 tx_ring
->stats
.seg_pkt_cnt
, tx_ring
->stats
.tx_more
,
2820 tx_ring
->stats
.restart_queue
, tx_ring
->stats
.tx_busy
);
2822 netdev_info(ndev
, "tx_push: %llu, tx_mem_doorbell: %llu\n",
2823 tx_ring
->stats
.tx_push
, tx_ring
->stats
.tx_mem_doorbell
);
2826 static void hns3_dump_queue_reg(struct net_device
*ndev
,
2827 struct hns3_enet_ring
*tx_ring
)
2830 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2831 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_BD_NUM_REG
),
2832 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_HEAD_REG
),
2833 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_TAIL_REG
),
2834 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_BD_ERR_REG
),
2835 readl(tx_ring
->tqp_vector
->mask_addr
));
2837 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2838 hns3_tqp_read_reg(tx_ring
, HNS3_RING_EN_REG
),
2839 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_TC_REG
),
2840 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_FBDNUM_REG
),
2841 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_OFFSET_REG
),
2842 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_EBDNUM_REG
),
2843 hns3_tqp_read_reg(tx_ring
,
2844 HNS3_RING_TX_RING_EBD_OFFSET_REG
));
2847 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
2849 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2850 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2851 struct hns3_enet_ring
*tx_ring
;
2854 timeout_queue
= hns3_get_timeout_queue(ndev
);
2855 if (timeout_queue
>= ndev
->num_tx_queues
) {
2857 "no netdev TX timeout queue found, timeout count: %llu\n",
2858 priv
->tx_timeout_count
);
2862 priv
->tx_timeout_count
++;
2864 tx_ring
= &priv
->ring
[timeout_queue
];
2865 hns3_dump_queue_stats(ndev
, tx_ring
, timeout_queue
);
2867 /* When mac received many pause frames continuous, it's unable to send
2868 * packets, which may cause tx timeout
2870 if (h
->ae_algo
->ops
->get_mac_stats
) {
2871 struct hns3_mac_stats mac_stats
;
2873 h
->ae_algo
->ops
->get_mac_stats(h
, &mac_stats
);
2874 netdev_info(ndev
, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2875 mac_stats
.tx_pause_cnt
, mac_stats
.rx_pause_cnt
);
2878 hns3_dump_queue_reg(ndev
, tx_ring
);
2883 static void hns3_nic_net_timeout(struct net_device
*ndev
, unsigned int txqueue
)
2885 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2886 struct hnae3_handle
*h
= priv
->ae_handle
;
2888 if (!hns3_get_tx_timeo_queue_info(ndev
))
2891 /* request the reset, and let the hclge to determine
2892 * which reset level should be done
2894 if (h
->ae_algo
->ops
->reset_event
)
2895 h
->ae_algo
->ops
->reset_event(h
->pdev
, h
);
2898 #ifdef CONFIG_RFS_ACCEL
2899 static int hns3_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
2900 u16 rxq_index
, u32 flow_id
)
2902 struct hnae3_handle
*h
= hns3_get_handle(dev
);
2903 struct flow_keys fkeys
;
2905 if (!h
->ae_algo
->ops
->add_arfs_entry
)
2908 if (skb
->encapsulation
)
2909 return -EPROTONOSUPPORT
;
2911 if (!skb_flow_dissect_flow_keys(skb
, &fkeys
, 0))
2912 return -EPROTONOSUPPORT
;
2914 if ((fkeys
.basic
.n_proto
!= htons(ETH_P_IP
) &&
2915 fkeys
.basic
.n_proto
!= htons(ETH_P_IPV6
)) ||
2916 (fkeys
.basic
.ip_proto
!= IPPROTO_TCP
&&
2917 fkeys
.basic
.ip_proto
!= IPPROTO_UDP
))
2918 return -EPROTONOSUPPORT
;
2920 return h
->ae_algo
->ops
->add_arfs_entry(h
, rxq_index
, flow_id
, &fkeys
);
2924 static int hns3_nic_get_vf_config(struct net_device
*ndev
, int vf
,
2925 struct ifla_vf_info
*ivf
)
2927 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2929 if (!h
->ae_algo
->ops
->get_vf_config
)
2932 return h
->ae_algo
->ops
->get_vf_config(h
, vf
, ivf
);
2935 static int hns3_nic_set_vf_link_state(struct net_device
*ndev
, int vf
,
2938 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2940 if (!h
->ae_algo
->ops
->set_vf_link_state
)
2943 return h
->ae_algo
->ops
->set_vf_link_state(h
, vf
, link_state
);
2946 static int hns3_nic_set_vf_rate(struct net_device
*ndev
, int vf
,
2947 int min_tx_rate
, int max_tx_rate
)
2949 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2951 if (!h
->ae_algo
->ops
->set_vf_rate
)
2954 return h
->ae_algo
->ops
->set_vf_rate(h
, vf
, min_tx_rate
, max_tx_rate
,
2958 static int hns3_nic_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
2960 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2961 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2963 if (!h
->ae_algo
->ops
->set_vf_mac
)
2966 if (is_multicast_ether_addr(mac
)) {
2967 hnae3_format_mac_addr(format_mac_addr
, mac
);
2969 "Invalid MAC:%s specified. Could not set MAC\n",
2974 return h
->ae_algo
->ops
->set_vf_mac(h
, vf_id
, mac
);
2977 #define HNS3_INVALID_DSCP 0xff
2978 #define HNS3_DSCP_SHIFT 2
2980 static u8
hns3_get_skb_dscp(struct sk_buff
*skb
)
2982 __be16 protocol
= skb
->protocol
;
2983 u8 dscp
= HNS3_INVALID_DSCP
;
2985 if (protocol
== htons(ETH_P_8021Q
))
2986 protocol
= vlan_get_protocol(skb
);
2988 if (protocol
== htons(ETH_P_IP
))
2989 dscp
= ipv4_get_dsfield(ip_hdr(skb
)) >> HNS3_DSCP_SHIFT
;
2990 else if (protocol
== htons(ETH_P_IPV6
))
2991 dscp
= ipv6_get_dsfield(ipv6_hdr(skb
)) >> HNS3_DSCP_SHIFT
;
2996 static u16
hns3_nic_select_queue(struct net_device
*netdev
,
2997 struct sk_buff
*skb
,
2998 struct net_device
*sb_dev
)
3000 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3003 if (h
->kinfo
.tc_map_mode
!= HNAE3_TC_MAP_MODE_DSCP
||
3004 !h
->ae_algo
->ops
->get_dscp_prio
)
3007 dscp
= hns3_get_skb_dscp(skb
);
3008 if (unlikely(dscp
>= HNAE3_MAX_DSCP
))
3011 skb
->priority
= h
->kinfo
.dscp_prio
[dscp
];
3012 if (skb
->priority
== HNAE3_PRIO_ID_INVALID
)
3016 return netdev_pick_tx(netdev
, skb
, sb_dev
);
3019 static const struct net_device_ops hns3_nic_netdev_ops
= {
3020 .ndo_open
= hns3_nic_net_open
,
3021 .ndo_stop
= hns3_nic_net_stop
,
3022 .ndo_start_xmit
= hns3_nic_net_xmit
,
3023 .ndo_tx_timeout
= hns3_nic_net_timeout
,
3024 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
3025 .ndo_eth_ioctl
= hns3_nic_do_ioctl
,
3026 .ndo_change_mtu
= hns3_nic_change_mtu
,
3027 .ndo_set_features
= hns3_nic_set_features
,
3028 .ndo_features_check
= hns3_features_check
,
3029 .ndo_get_stats64
= hns3_nic_get_stats64
,
3030 .ndo_setup_tc
= hns3_nic_setup_tc
,
3031 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
3032 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
3033 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
3034 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
3035 .ndo_set_vf_spoofchk
= hns3_set_vf_spoofchk
,
3036 .ndo_set_vf_trust
= hns3_set_vf_trust
,
3037 #ifdef CONFIG_RFS_ACCEL
3038 .ndo_rx_flow_steer
= hns3_rx_flow_steer
,
3040 .ndo_get_vf_config
= hns3_nic_get_vf_config
,
3041 .ndo_set_vf_link_state
= hns3_nic_set_vf_link_state
,
3042 .ndo_set_vf_rate
= hns3_nic_set_vf_rate
,
3043 .ndo_set_vf_mac
= hns3_nic_set_vf_mac
,
3044 .ndo_select_queue
= hns3_nic_select_queue
,
3047 bool hns3_is_phys_func(struct pci_dev
*pdev
)
3049 u32 dev_id
= pdev
->device
;
3052 case HNAE3_DEV_ID_GE
:
3053 case HNAE3_DEV_ID_25GE
:
3054 case HNAE3_DEV_ID_25GE_RDMA
:
3055 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
3056 case HNAE3_DEV_ID_50GE_RDMA
:
3057 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
3058 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
3059 case HNAE3_DEV_ID_200G_RDMA
:
3061 case HNAE3_DEV_ID_VF
:
3062 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF
:
3065 dev_warn(&pdev
->dev
, "un-recognized pci device-id %u",
3072 static void hns3_disable_sriov(struct pci_dev
*pdev
)
3074 /* If our VFs are assigned we cannot shut down SR-IOV
3075 * without causing issues, so just leave the hardware
3076 * available but disabled
3078 if (pci_vfs_assigned(pdev
)) {
3079 dev_warn(&pdev
->dev
,
3080 "disabling driver while VFs are assigned\n");
3084 pci_disable_sriov(pdev
);
3087 /* hns3_probe - Device initialization routine
3088 * @pdev: PCI device information struct
3089 * @ent: entry in hns3_pci_tbl
3091 * hns3_probe initializes a PF identified by a pci_dev structure.
3092 * The OS initialization, configuring of the PF private structure,
3093 * and a hardware reset occur.
3095 * Returns 0 on success, negative on failure
3097 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3099 struct hnae3_ae_dev
*ae_dev
;
3102 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
), GFP_KERNEL
);
3106 ae_dev
->pdev
= pdev
;
3107 ae_dev
->flag
= ent
->driver_data
;
3108 pci_set_drvdata(pdev
, ae_dev
);
3110 ret
= hnae3_register_ae_dev(ae_dev
);
3112 pci_set_drvdata(pdev
, NULL
);
3118 * hns3_clean_vf_config
3119 * @pdev: pointer to a pci_dev structure
3120 * @num_vfs: number of VFs allocated
3122 * Clean residual vf config after disable sriov
3124 static void hns3_clean_vf_config(struct pci_dev
*pdev
, int num_vfs
)
3126 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3128 if (ae_dev
->ops
->clean_vf_config
)
3129 ae_dev
->ops
->clean_vf_config(ae_dev
, num_vfs
);
3132 /* hns3_remove - Device removal routine
3133 * @pdev: PCI device information struct
3135 static void hns3_remove(struct pci_dev
*pdev
)
3137 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3139 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
3140 hns3_disable_sriov(pdev
);
3142 hnae3_unregister_ae_dev(ae_dev
);
3143 pci_set_drvdata(pdev
, NULL
);
3147 * hns3_pci_sriov_configure
3148 * @pdev: pointer to a pci_dev structure
3149 * @num_vfs: number of VFs to allocate
3151 * Enable or change the number of VFs. Called when the user updates the number
3154 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
3158 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
3159 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
3164 ret
= pci_enable_sriov(pdev
, num_vfs
);
3166 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
3169 } else if (!pci_vfs_assigned(pdev
)) {
3170 int num_vfs_pre
= pci_num_vf(pdev
);
3172 pci_disable_sriov(pdev
);
3173 hns3_clean_vf_config(pdev
, num_vfs_pre
);
3175 dev_warn(&pdev
->dev
,
3176 "Unable to free VFs because some are assigned to VMs.\n");
3182 static void hns3_shutdown(struct pci_dev
*pdev
)
3184 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3186 hnae3_unregister_ae_dev(ae_dev
);
3187 pci_set_drvdata(pdev
, NULL
);
3189 if (system_state
== SYSTEM_POWER_OFF
)
3190 pci_set_power_state(pdev
, PCI_D3hot
);
3193 static int __maybe_unused
hns3_suspend(struct device
*dev
)
3195 struct hnae3_ae_dev
*ae_dev
= dev_get_drvdata(dev
);
3197 if (ae_dev
&& hns3_is_phys_func(ae_dev
->pdev
)) {
3198 dev_info(dev
, "Begin to suspend.\n");
3199 if (ae_dev
->ops
&& ae_dev
->ops
->reset_prepare
)
3200 ae_dev
->ops
->reset_prepare(ae_dev
, HNAE3_FUNC_RESET
);
3206 static int __maybe_unused
hns3_resume(struct device
*dev
)
3208 struct hnae3_ae_dev
*ae_dev
= dev_get_drvdata(dev
);
3210 if (ae_dev
&& hns3_is_phys_func(ae_dev
->pdev
)) {
3211 dev_info(dev
, "Begin to resume.\n");
3212 if (ae_dev
->ops
&& ae_dev
->ops
->reset_done
)
3213 ae_dev
->ops
->reset_done(ae_dev
);
3219 static pci_ers_result_t
hns3_error_detected(struct pci_dev
*pdev
,
3220 pci_channel_state_t state
)
3222 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3223 pci_ers_result_t ret
;
3225 dev_info(&pdev
->dev
, "PCI error detected, state(=%u)!!\n", state
);
3227 if (state
== pci_channel_io_perm_failure
)
3228 return PCI_ERS_RESULT_DISCONNECT
;
3230 if (!ae_dev
|| !ae_dev
->ops
) {
3232 "Can't recover - error happened before device initialized\n");
3233 return PCI_ERS_RESULT_NONE
;
3236 if (ae_dev
->ops
->handle_hw_ras_error
)
3237 ret
= ae_dev
->ops
->handle_hw_ras_error(ae_dev
);
3239 return PCI_ERS_RESULT_NONE
;
3244 static pci_ers_result_t
hns3_slot_reset(struct pci_dev
*pdev
)
3246 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3247 const struct hnae3_ae_ops
*ops
;
3248 enum hnae3_reset_type reset_type
;
3249 struct device
*dev
= &pdev
->dev
;
3251 if (!ae_dev
|| !ae_dev
->ops
)
3252 return PCI_ERS_RESULT_NONE
;
3255 /* request the reset */
3256 if (ops
->reset_event
&& ops
->get_reset_level
&&
3257 ops
->set_default_reset_request
) {
3258 if (ae_dev
->hw_err_reset_req
) {
3259 reset_type
= ops
->get_reset_level(ae_dev
,
3260 &ae_dev
->hw_err_reset_req
);
3261 ops
->set_default_reset_request(ae_dev
, reset_type
);
3262 dev_info(dev
, "requesting reset due to PCI error\n");
3263 ops
->reset_event(pdev
, NULL
);
3266 return PCI_ERS_RESULT_RECOVERED
;
3269 return PCI_ERS_RESULT_DISCONNECT
;
3272 static void hns3_reset_prepare(struct pci_dev
*pdev
)
3274 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3276 dev_info(&pdev
->dev
, "FLR prepare\n");
3277 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->reset_prepare
)
3278 ae_dev
->ops
->reset_prepare(ae_dev
, HNAE3_FLR_RESET
);
3281 static void hns3_reset_done(struct pci_dev
*pdev
)
3283 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3285 dev_info(&pdev
->dev
, "FLR done\n");
3286 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->reset_done
)
3287 ae_dev
->ops
->reset_done(ae_dev
);
3290 static const struct pci_error_handlers hns3_err_handler
= {
3291 .error_detected
= hns3_error_detected
,
3292 .slot_reset
= hns3_slot_reset
,
3293 .reset_prepare
= hns3_reset_prepare
,
3294 .reset_done
= hns3_reset_done
,
3297 static SIMPLE_DEV_PM_OPS(hns3_pm_ops
, hns3_suspend
, hns3_resume
);
3299 static struct pci_driver hns3_driver
= {
3300 .name
= hns3_driver_name
,
3301 .id_table
= hns3_pci_tbl
,
3302 .probe
= hns3_probe
,
3303 .remove
= hns3_remove
,
3304 .shutdown
= hns3_shutdown
,
3305 .driver
.pm
= &hns3_pm_ops
,
3306 .sriov_configure
= hns3_pci_sriov_configure
,
3307 .err_handler
= &hns3_err_handler
,
3310 /* set default feature to hns3 */
3311 static void hns3_set_default_feature(struct net_device
*netdev
)
3313 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3314 struct pci_dev
*pdev
= h
->pdev
;
3315 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3317 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3319 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3320 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
3321 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
3322 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
3323 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
3324 NETIF_F_SCTP_CRC
| NETIF_F_FRAGLIST
;
3326 if (hnae3_ae_dev_gro_supported(ae_dev
))
3327 netdev
->features
|= NETIF_F_GRO_HW
;
3329 if (hnae3_ae_dev_fd_supported(ae_dev
))
3330 netdev
->features
|= NETIF_F_NTUPLE
;
3332 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B
, ae_dev
->caps
))
3333 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
3335 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
))
3336 netdev
->features
|= NETIF_F_HW_CSUM
;
3338 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3340 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B
, ae_dev
->caps
))
3341 netdev
->features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3343 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
))
3344 netdev
->features
|= NETIF_F_HW_TC
;
3346 netdev
->hw_features
|= netdev
->features
;
3347 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
3348 netdev
->hw_features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
3350 netdev
->vlan_features
|= netdev
->features
&
3351 ~(NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
|
3352 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_GRO_HW
| NETIF_F_NTUPLE
|
3355 netdev
->hw_enc_features
|= netdev
->vlan_features
| NETIF_F_TSO_MANGLEID
;
3358 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
3359 struct hns3_desc_cb
*cb
)
3361 unsigned int order
= hns3_page_order(ring
);
3364 if (ring
->page_pool
) {
3365 p
= page_pool_dev_alloc_frag(ring
->page_pool
,
3367 hns3_buf_size(ring
));
3372 cb
->buf
= page_address(p
);
3373 cb
->dma
= page_pool_get_dma_addr(p
);
3374 cb
->type
= DESC_TYPE_PP_FRAG
;
3379 p
= dev_alloc_pages(order
);
3384 cb
->page_offset
= 0;
3386 cb
->buf
= page_address(p
);
3387 cb
->length
= hns3_page_size(ring
);
3388 cb
->type
= DESC_TYPE_PAGE
;
3389 page_ref_add(p
, USHRT_MAX
- 1);
3390 cb
->pagecnt_bias
= USHRT_MAX
;
3395 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
3396 struct hns3_desc_cb
*cb
, int budget
)
3398 if (cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_BOUNCE_HEAD
|
3399 DESC_TYPE_BOUNCE_ALL
| DESC_TYPE_SGL_SKB
))
3400 napi_consume_skb(cb
->priv
, budget
);
3401 else if (!HNAE3_IS_TX_RING(ring
)) {
3402 if (cb
->type
& DESC_TYPE_PAGE
&& cb
->pagecnt_bias
)
3403 __page_frag_cache_drain(cb
->priv
, cb
->pagecnt_bias
);
3404 else if (cb
->type
& DESC_TYPE_PP_FRAG
)
3405 page_pool_put_full_page(ring
->page_pool
, cb
->priv
,
3408 memset(cb
, 0, sizeof(*cb
));
3411 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
3413 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
3414 cb
->length
, ring_to_dma_dir(ring
));
3416 if (unlikely(dma_mapping_error(ring_to_dev(ring
), cb
->dma
)))
3422 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
3423 struct hns3_desc_cb
*cb
)
3425 if (cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_FRAGLIST_SKB
))
3426 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
3427 ring_to_dma_dir(ring
));
3428 else if ((cb
->type
& DESC_TYPE_PAGE
) && cb
->length
)
3429 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
3430 ring_to_dma_dir(ring
));
3431 else if (cb
->type
& (DESC_TYPE_BOUNCE_ALL
| DESC_TYPE_BOUNCE_HEAD
|
3433 hns3_tx_spare_reclaim_cb(ring
, cb
);
3436 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
3438 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
3439 ring
->desc
[i
].addr
= 0;
3440 ring
->desc_cb
[i
].refill
= 0;
3443 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
,
3446 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
3448 if (!ring
->desc_cb
[i
].dma
)
3451 hns3_buffer_detach(ring
, i
);
3452 hns3_free_buffer(ring
, cb
, budget
);
3455 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
3459 for (i
= 0; i
< ring
->desc_num
; i
++)
3460 hns3_free_buffer_detach(ring
, i
, 0);
3463 /* free desc along with its attached buffer */
3464 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
3466 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
3468 hns3_free_buffers(ring
);
3471 dma_free_coherent(ring_to_dev(ring
), size
,
3472 ring
->desc
, ring
->desc_dma_addr
);
3477 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
3479 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
3481 ring
->desc
= dma_alloc_coherent(ring_to_dev(ring
), size
,
3482 &ring
->desc_dma_addr
, GFP_KERNEL
);
3489 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring
*ring
,
3490 struct hns3_desc_cb
*cb
)
3494 ret
= hns3_alloc_buffer(ring
, cb
);
3495 if (ret
|| ring
->page_pool
)
3498 ret
= hns3_map_buffer(ring
, cb
);
3505 hns3_free_buffer(ring
, cb
, 0);
3510 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring
*ring
, int i
)
3512 int ret
= hns3_alloc_and_map_buffer(ring
, &ring
->desc_cb
[i
]);
3517 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3518 ring
->desc_cb
[i
].page_offset
);
3519 ring
->desc_cb
[i
].refill
= 1;
3524 /* Allocate memory for raw pkg, and map with dma */
3525 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
3529 for (i
= 0; i
< ring
->desc_num
; i
++) {
3530 ret
= hns3_alloc_and_attach_buffer(ring
, i
);
3532 goto out_buffer_fail
;
3538 for (j
= i
- 1; j
>= 0; j
--)
3539 hns3_free_buffer_detach(ring
, j
, 0);
3543 /* detach a in-used buffer and replace with a reserved one */
3544 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
3545 struct hns3_desc_cb
*res_cb
)
3547 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
3548 ring
->desc_cb
[i
] = *res_cb
;
3549 ring
->desc_cb
[i
].refill
= 1;
3550 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3551 ring
->desc_cb
[i
].page_offset
);
3552 ring
->desc
[i
].rx
.bd_base_info
= 0;
3555 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
3557 ring
->desc_cb
[i
].reuse_flag
= 0;
3558 ring
->desc_cb
[i
].refill
= 1;
3559 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3560 ring
->desc_cb
[i
].page_offset
);
3561 ring
->desc
[i
].rx
.bd_base_info
= 0;
3563 dma_sync_single_for_device(ring_to_dev(ring
),
3564 ring
->desc_cb
[i
].dma
+ ring
->desc_cb
[i
].page_offset
,
3565 hns3_buf_size(ring
),
3569 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring
*ring
,
3570 int *bytes
, int *pkts
, int budget
)
3572 /* This smp_load_acquire() pairs with smp_store_release() in
3573 * hns3_tx_doorbell().
3575 int ltu
= smp_load_acquire(&ring
->last_to_use
);
3576 int ntc
= ring
->next_to_clean
;
3577 struct hns3_desc_cb
*desc_cb
;
3578 bool reclaimed
= false;
3579 struct hns3_desc
*desc
;
3581 while (ltu
!= ntc
) {
3582 desc
= &ring
->desc
[ntc
];
3584 if (le16_to_cpu(desc
->tx
.bdtp_fe_sc_vld_ra_ri
) &
3585 BIT(HNS3_TXD_VLD_B
))
3588 desc_cb
= &ring
->desc_cb
[ntc
];
3590 if (desc_cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_BOUNCE_ALL
|
3591 DESC_TYPE_BOUNCE_HEAD
|
3592 DESC_TYPE_SGL_SKB
)) {
3594 (*bytes
) += desc_cb
->send_bytes
;
3597 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3598 hns3_free_buffer_detach(ring
, ntc
, budget
);
3600 if (++ntc
== ring
->desc_num
)
3603 /* Issue prefetch for next Tx descriptor */
3604 prefetch(&ring
->desc_cb
[ntc
]);
3608 if (unlikely(!reclaimed
))
3611 /* This smp_store_release() pairs with smp_load_acquire() in
3612 * ring_space called by hns3_nic_net_xmit.
3614 smp_store_release(&ring
->next_to_clean
, ntc
);
3616 hns3_tx_spare_update(ring
);
3621 void hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
3623 struct net_device
*netdev
= ring_to_netdev(ring
);
3624 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3625 struct netdev_queue
*dev_queue
;
3631 if (unlikely(!hns3_nic_reclaim_desc(ring
, &bytes
, &pkts
, budget
)))
3634 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
3635 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
3637 u64_stats_update_begin(&ring
->syncp
);
3638 ring
->stats
.tx_bytes
+= bytes
;
3639 ring
->stats
.tx_pkts
+= pkts
;
3640 u64_stats_update_end(&ring
->syncp
);
3642 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
3643 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
3645 if (unlikely(netif_carrier_ok(netdev
) &&
3646 ring_space(ring
) > HNS3_MAX_TSO_BD_NUM
)) {
3647 /* Make sure that anybody stopping the queue after this
3648 * sees the new next_to_clean.
3651 if (netif_tx_queue_stopped(dev_queue
) &&
3652 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
3653 netif_tx_wake_queue(dev_queue
);
3654 ring
->stats
.restart_queue
++;
3659 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
3661 int ntc
= ring
->next_to_clean
;
3662 int ntu
= ring
->next_to_use
;
3664 if (unlikely(ntc
== ntu
&& !ring
->desc_cb
[ntc
].refill
))
3665 return ring
->desc_num
;
3667 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
3670 /* Return true if there is any allocation failure */
3671 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
,
3674 struct hns3_desc_cb
*desc_cb
;
3675 struct hns3_desc_cb res_cbs
;
3678 for (i
= 0; i
< cleand_count
; i
++) {
3679 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
3680 if (desc_cb
->reuse_flag
) {
3681 hns3_ring_stats_update(ring
, reuse_pg_cnt
);
3683 hns3_reuse_buffer(ring
, ring
->next_to_use
);
3685 ret
= hns3_alloc_and_map_buffer(ring
, &res_cbs
);
3687 hns3_ring_stats_update(ring
, sw_err_cnt
);
3689 hns3_rl_err(ring_to_netdev(ring
),
3690 "alloc rx buffer failed: %d\n",
3693 writel(i
, ring
->tqp
->io_base
+
3694 HNS3_RING_RX_RING_HEAD_REG
);
3697 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
3699 hns3_ring_stats_update(ring
, non_reuse_pg
);
3702 ring_ptr_move_fw(ring
, next_to_use
);
3705 writel(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
3709 static bool hns3_can_reuse_page(struct hns3_desc_cb
*cb
)
3711 return page_count(cb
->priv
) == cb
->pagecnt_bias
;
3714 static int hns3_handle_rx_copybreak(struct sk_buff
*skb
, int i
,
3715 struct hns3_enet_ring
*ring
,
3717 struct hns3_desc_cb
*desc_cb
)
3719 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_clean
];
3720 u32 frag_offset
= desc_cb
->page_offset
+ pull_len
;
3721 int size
= le16_to_cpu(desc
->rx
.size
);
3722 u32 frag_size
= size
- pull_len
;
3723 void *frag
= napi_alloc_frag(frag_size
);
3725 if (unlikely(!frag
)) {
3726 hns3_ring_stats_update(ring
, frag_alloc_err
);
3728 hns3_rl_err(ring_to_netdev(ring
),
3729 "failed to allocate rx frag\n");
3733 desc_cb
->reuse_flag
= 1;
3734 memcpy(frag
, desc_cb
->buf
+ frag_offset
, frag_size
);
3735 skb_add_rx_frag(skb
, i
, virt_to_page(frag
),
3736 offset_in_page(frag
), frag_size
, frag_size
);
3738 hns3_ring_stats_update(ring
, frag_alloc
);
3742 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
3743 struct hns3_enet_ring
*ring
, int pull_len
,
3744 struct hns3_desc_cb
*desc_cb
)
3746 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_clean
];
3747 u32 frag_offset
= desc_cb
->page_offset
+ pull_len
;
3748 int size
= le16_to_cpu(desc
->rx
.size
);
3749 u32 truesize
= hns3_buf_size(ring
);
3750 u32 frag_size
= size
- pull_len
;
3754 if (ring
->page_pool
) {
3755 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, frag_offset
,
3756 frag_size
, truesize
);
3760 /* Avoid re-using remote or pfmem page */
3761 if (unlikely(!dev_page_is_reusable(desc_cb
->priv
)))
3764 reused
= hns3_can_reuse_page(desc_cb
);
3766 /* Rx page can be reused when:
3767 * 1. Rx page is only owned by the driver when page_offset
3768 * is zero, which means 0 @ truesize will be used by
3769 * stack after skb_add_rx_frag() is called, and the rest
3770 * of rx page can be reused by driver.
3772 * 2. Rx page is only owned by the driver when page_offset
3773 * is non-zero, which means page_offset @ truesize will
3774 * be used by stack after skb_add_rx_frag() is called,
3775 * and 0 @ truesize can be reused by driver.
3777 if ((!desc_cb
->page_offset
&& reused
) ||
3778 ((desc_cb
->page_offset
+ truesize
+ truesize
) <=
3779 hns3_page_size(ring
) && desc_cb
->page_offset
)) {
3780 desc_cb
->page_offset
+= truesize
;
3781 desc_cb
->reuse_flag
= 1;
3782 } else if (desc_cb
->page_offset
&& reused
) {
3783 desc_cb
->page_offset
= 0;
3784 desc_cb
->reuse_flag
= 1;
3785 } else if (frag_size
<= ring
->rx_copybreak
) {
3786 ret
= hns3_handle_rx_copybreak(skb
, i
, ring
, pull_len
, desc_cb
);
3792 desc_cb
->pagecnt_bias
--;
3794 if (unlikely(!desc_cb
->pagecnt_bias
)) {
3795 page_ref_add(desc_cb
->priv
, USHRT_MAX
);
3796 desc_cb
->pagecnt_bias
= USHRT_MAX
;
3799 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, frag_offset
,
3800 frag_size
, truesize
);
3802 if (unlikely(!desc_cb
->reuse_flag
))
3803 __page_frag_cache_drain(desc_cb
->priv
, desc_cb
->pagecnt_bias
);
3806 static int hns3_gro_complete(struct sk_buff
*skb
, u32 l234info
)
3808 __be16 type
= skb
->protocol
;
3812 while (eth_type_vlan(type
)) {
3813 struct vlan_hdr
*vh
;
3815 if ((depth
+ VLAN_HLEN
) > skb_headlen(skb
))
3818 vh
= (struct vlan_hdr
*)(skb
->data
+ depth
);
3819 type
= vh
->h_vlan_encapsulated_proto
;
3823 skb_set_network_header(skb
, depth
);
3825 if (type
== htons(ETH_P_IP
)) {
3826 const struct iphdr
*iph
= ip_hdr(skb
);
3828 depth
+= sizeof(struct iphdr
);
3829 skb_set_transport_header(skb
, depth
);
3831 th
->check
= ~tcp_v4_check(skb
->len
- depth
, iph
->saddr
,
3833 } else if (type
== htons(ETH_P_IPV6
)) {
3834 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
3836 depth
+= sizeof(struct ipv6hdr
);
3837 skb_set_transport_header(skb
, depth
);
3839 th
->check
= ~tcp_v6_check(skb
->len
- depth
, &iph
->saddr
,
3842 hns3_rl_err(skb
->dev
,
3843 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3844 be16_to_cpu(type
), depth
);
3848 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
3850 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
3852 if (l234info
& BIT(HNS3_RXD_GRO_FIXID_B
))
3853 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_FIXEDID
;
3855 skb
->csum_start
= (unsigned char *)th
- skb
->head
;
3856 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
3857 skb
->ip_summed
= CHECKSUM_PARTIAL
;
3859 trace_hns3_gro(skb
);
3864 static void hns3_checksum_complete(struct hns3_enet_ring
*ring
,
3865 struct sk_buff
*skb
, u32 ptype
, u16 csum
)
3867 if (ptype
== HNS3_INVALID_PTYPE
||
3868 hns3_rx_ptype_tbl
[ptype
].ip_summed
!= CHECKSUM_COMPLETE
)
3871 hns3_ring_stats_update(ring
, csum_complete
);
3872 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3873 skb
->csum
= csum_unfold((__force __sum16
)csum
);
3876 static void hns3_rx_handle_csum(struct sk_buff
*skb
, u32 l234info
,
3877 u32 ol_info
, u32 ptype
)
3879 int l3_type
, l4_type
;
3882 if (ptype
!= HNS3_INVALID_PTYPE
) {
3883 skb
->csum_level
= hns3_rx_ptype_tbl
[ptype
].csum_level
;
3884 skb
->ip_summed
= hns3_rx_ptype_tbl
[ptype
].ip_summed
;
3889 ol4_type
= hnae3_get_field(ol_info
, HNS3_RXD_OL4ID_M
,
3892 case HNS3_OL4_TYPE_MAC_IN_UDP
:
3893 case HNS3_OL4_TYPE_NVGRE
:
3894 skb
->csum_level
= 1;
3896 case HNS3_OL4_TYPE_NO_TUN
:
3897 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
3899 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
3901 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3902 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
3903 l3_type
== HNS3_L3_TYPE_IPV6
) &&
3904 (l4_type
== HNS3_L4_TYPE_UDP
||
3905 l4_type
== HNS3_L4_TYPE_TCP
||
3906 l4_type
== HNS3_L4_TYPE_SCTP
))
3907 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3914 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
3915 u32 l234info
, u32 bd_base_info
, u32 ol_info
,
3918 struct net_device
*netdev
= ring_to_netdev(ring
);
3919 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3920 u32 ptype
= HNS3_INVALID_PTYPE
;
3922 skb
->ip_summed
= CHECKSUM_NONE
;
3924 skb_checksum_none_assert(skb
);
3926 if (!(netdev
->features
& NETIF_F_RXCSUM
))
3929 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
))
3930 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
3933 hns3_checksum_complete(ring
, skb
, ptype
, csum
);
3935 /* check if hardware has done checksum */
3936 if (!(bd_base_info
& BIT(HNS3_RXD_L3L4P_B
)))
3939 if (unlikely(l234info
& (BIT(HNS3_RXD_L3E_B
) | BIT(HNS3_RXD_L4E_B
) |
3940 BIT(HNS3_RXD_OL3E_B
) |
3941 BIT(HNS3_RXD_OL4E_B
)))) {
3942 skb
->ip_summed
= CHECKSUM_NONE
;
3943 hns3_ring_stats_update(ring
, l3l4_csum_err
);
3948 hns3_rx_handle_csum(skb
, l234info
, ol_info
, ptype
);
3951 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3953 if (skb_has_frag_list(skb
))
3954 napi_gro_flush(&ring
->tqp_vector
->napi
, false);
3956 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
3959 static bool hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
3960 struct hns3_desc
*desc
, u32 l234info
,
3963 struct hnae3_handle
*handle
= ring
->tqp
->handle
;
3964 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
3965 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3967 if (unlikely(ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)) {
3968 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
3969 if (!(*vlan_tag
& VLAN_VID_MASK
))
3970 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
3972 return (*vlan_tag
!= 0);
3975 #define HNS3_STRP_OUTER_VLAN 0x1
3976 #define HNS3_STRP_INNER_VLAN 0x2
3977 #define HNS3_STRP_BOTH 0x3
3979 /* Hardware always insert VLAN tag into RX descriptor when
3980 * remove the tag from packet, driver needs to determine
3981 * reporting which tag to stack.
3983 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
3984 HNS3_RXD_STRP_TAGP_S
)) {
3985 case HNS3_STRP_OUTER_VLAN
:
3986 if (handle
->port_base_vlan_state
!=
3987 HNAE3_PORT_BASE_VLAN_DISABLE
)
3990 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
3992 case HNS3_STRP_INNER_VLAN
:
3993 if (handle
->port_base_vlan_state
!=
3994 HNAE3_PORT_BASE_VLAN_DISABLE
)
3997 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
3999 case HNS3_STRP_BOTH
:
4000 if (handle
->port_base_vlan_state
==
4001 HNAE3_PORT_BASE_VLAN_DISABLE
)
4002 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
4004 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
4012 static void hns3_rx_ring_move_fw(struct hns3_enet_ring
*ring
)
4014 ring
->desc
[ring
->next_to_clean
].rx
.bd_base_info
&=
4015 cpu_to_le32(~BIT(HNS3_RXD_VLD_B
));
4016 ring
->desc_cb
[ring
->next_to_clean
].refill
= 0;
4017 ring
->next_to_clean
+= 1;
4019 if (unlikely(ring
->next_to_clean
== ring
->desc_num
))
4020 ring
->next_to_clean
= 0;
4023 static int hns3_alloc_skb(struct hns3_enet_ring
*ring
, unsigned int length
,
4026 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4027 struct net_device
*netdev
= ring_to_netdev(ring
);
4028 struct sk_buff
*skb
;
4030 ring
->skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, HNS3_RX_HEAD_SIZE
);
4032 if (unlikely(!skb
)) {
4033 hns3_rl_err(netdev
, "alloc rx skb fail\n");
4034 hns3_ring_stats_update(ring
, sw_err_cnt
);
4039 trace_hns3_rx_desc(ring
);
4040 prefetchw(skb
->data
);
4042 ring
->pending_buf
= 1;
4044 ring
->tail_skb
= NULL
;
4045 if (length
<= HNS3_RX_HEAD_SIZE
) {
4046 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
4048 /* We can reuse buffer as-is, just make sure it is reusable */
4049 if (dev_page_is_reusable(desc_cb
->priv
))
4050 desc_cb
->reuse_flag
= 1;
4051 else if (desc_cb
->type
& DESC_TYPE_PP_FRAG
)
4052 page_pool_put_full_page(ring
->page_pool
, desc_cb
->priv
,
4054 else /* This page cannot be reused so discard it */
4055 __page_frag_cache_drain(desc_cb
->priv
,
4056 desc_cb
->pagecnt_bias
);
4058 hns3_rx_ring_move_fw(ring
);
4062 if (ring
->page_pool
)
4063 skb_mark_for_recycle(skb
);
4065 hns3_ring_stats_update(ring
, seg_pkt_cnt
);
4067 ring
->pull_len
= eth_get_headlen(netdev
, va
, HNS3_RX_HEAD_SIZE
);
4068 __skb_put(skb
, ring
->pull_len
);
4069 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, ring
->pull_len
,
4071 hns3_rx_ring_move_fw(ring
);
4076 static int hns3_add_frag(struct hns3_enet_ring
*ring
)
4078 struct sk_buff
*skb
= ring
->skb
;
4079 struct sk_buff
*head_skb
= skb
;
4080 struct sk_buff
*new_skb
;
4081 struct hns3_desc_cb
*desc_cb
;
4082 struct hns3_desc
*desc
;
4086 desc
= &ring
->desc
[ring
->next_to_clean
];
4087 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4088 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4089 /* make sure HW write desc complete */
4091 if (!(bd_base_info
& BIT(HNS3_RXD_VLD_B
)))
4094 if (unlikely(ring
->frag_num
>= MAX_SKB_FRAGS
)) {
4095 new_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, 0);
4096 if (unlikely(!new_skb
)) {
4097 hns3_rl_err(ring_to_netdev(ring
),
4098 "alloc rx fraglist skb fail\n");
4102 if (ring
->page_pool
)
4103 skb_mark_for_recycle(new_skb
);
4107 if (ring
->tail_skb
) {
4108 ring
->tail_skb
->next
= new_skb
;
4109 ring
->tail_skb
= new_skb
;
4111 skb_shinfo(skb
)->frag_list
= new_skb
;
4112 ring
->tail_skb
= new_skb
;
4116 if (ring
->tail_skb
) {
4117 head_skb
->truesize
+= hns3_buf_size(ring
);
4118 head_skb
->data_len
+= le16_to_cpu(desc
->rx
.size
);
4119 head_skb
->len
+= le16_to_cpu(desc
->rx
.size
);
4120 skb
= ring
->tail_skb
;
4123 dma_sync_single_for_cpu(ring_to_dev(ring
),
4124 desc_cb
->dma
+ desc_cb
->page_offset
,
4125 hns3_buf_size(ring
),
4128 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, 0, desc_cb
);
4129 trace_hns3_rx_desc(ring
);
4130 hns3_rx_ring_move_fw(ring
);
4131 ring
->pending_buf
++;
4132 } while (!(bd_base_info
& BIT(HNS3_RXD_FE_B
)));
4137 static int hns3_set_gro_and_checksum(struct hns3_enet_ring
*ring
,
4138 struct sk_buff
*skb
, u32 l234info
,
4139 u32 bd_base_info
, u32 ol_info
, u16 csum
)
4141 struct net_device
*netdev
= ring_to_netdev(ring
);
4142 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4145 skb_shinfo(skb
)->gso_size
= hnae3_get_field(bd_base_info
,
4146 HNS3_RXD_GRO_SIZE_M
,
4147 HNS3_RXD_GRO_SIZE_S
);
4148 /* if there is no HW GRO, do not set gro params */
4149 if (!skb_shinfo(skb
)->gso_size
) {
4150 hns3_rx_checksum(ring
, skb
, l234info
, bd_base_info
, ol_info
,
4155 NAPI_GRO_CB(skb
)->count
= hnae3_get_field(l234info
,
4156 HNS3_RXD_GRO_COUNT_M
,
4157 HNS3_RXD_GRO_COUNT_S
);
4159 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
)) {
4160 u32 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
4163 l3_type
= hns3_rx_ptype_tbl
[ptype
].l3_type
;
4165 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
4169 if (l3_type
== HNS3_L3_TYPE_IPV4
)
4170 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
4171 else if (l3_type
== HNS3_L3_TYPE_IPV6
)
4172 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
4176 return hns3_gro_complete(skb
, l234info
);
4179 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring
*ring
,
4180 struct sk_buff
*skb
, u32 rss_hash
,
4181 u32 l234info
, u32 ol_info
)
4183 enum pkt_hash_types rss_type
= PKT_HASH_TYPE_NONE
;
4184 struct net_device
*netdev
= ring_to_netdev(ring
);
4185 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4187 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
)) {
4188 u32 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
4191 rss_type
= hns3_rx_ptype_tbl
[ptype
].hash_type
;
4193 int l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
4195 int l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
4198 if (l3_type
== HNS3_L3_TYPE_IPV4
||
4199 l3_type
== HNS3_L3_TYPE_IPV6
) {
4200 if (l4_type
== HNS3_L4_TYPE_UDP
||
4201 l4_type
== HNS3_L4_TYPE_TCP
||
4202 l4_type
== HNS3_L4_TYPE_SCTP
)
4203 rss_type
= PKT_HASH_TYPE_L4
;
4204 else if (l4_type
== HNS3_L4_TYPE_IGMP
||
4205 l4_type
== HNS3_L4_TYPE_ICMP
)
4206 rss_type
= PKT_HASH_TYPE_L3
;
4210 skb_set_hash(skb
, rss_hash
, rss_type
);
4213 static void hns3_handle_rx_ts_info(struct net_device
*netdev
,
4214 struct hns3_desc
*desc
, struct sk_buff
*skb
,
4217 if (unlikely(bd_base_info
& BIT(HNS3_RXD_TS_VLD_B
))) {
4218 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4219 u32 nsec
= le32_to_cpu(desc
->ts_nsec
);
4220 u32 sec
= le32_to_cpu(desc
->ts_sec
);
4222 if (h
->ae_algo
->ops
->get_rx_hwts
)
4223 h
->ae_algo
->ops
->get_rx_hwts(h
, skb
, nsec
, sec
);
4227 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring
*ring
,
4228 struct hns3_desc
*desc
, struct sk_buff
*skb
,
4231 struct net_device
*netdev
= ring_to_netdev(ring
);
4233 /* Based on hw strategy, the tag offloaded will be stored at
4234 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4235 * in one layer tag case.
4237 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
4240 if (hns3_parse_vlan_tag(ring
, desc
, l234info
, &vlan_tag
))
4241 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
4246 static int hns3_handle_bdinfo(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
4248 struct net_device
*netdev
= ring_to_netdev(ring
);
4249 enum hns3_pkt_l2t_type l2_frame_type
;
4250 u32 bd_base_info
, l234info
, ol_info
;
4251 struct hns3_desc
*desc
;
4256 /* bdinfo handled below is only valid on the last BD of the
4257 * current packet, and ring->next_to_clean indicates the first
4258 * descriptor of next packet, so need - 1 below.
4260 pre_ntc
= ring
->next_to_clean
? (ring
->next_to_clean
- 1) :
4261 (ring
->desc_num
- 1);
4262 desc
= &ring
->desc
[pre_ntc
];
4263 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4264 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
4265 ol_info
= le32_to_cpu(desc
->rx
.ol_info
);
4266 csum
= le16_to_cpu(desc
->csum
);
4268 hns3_handle_rx_ts_info(netdev
, desc
, skb
, bd_base_info
);
4270 hns3_handle_rx_vlan_tag(ring
, desc
, skb
, l234info
);
4272 if (unlikely(!desc
->rx
.pkt_len
|| (l234info
& (BIT(HNS3_RXD_TRUNCAT_B
) |
4273 BIT(HNS3_RXD_L2E_B
))))) {
4274 u64_stats_update_begin(&ring
->syncp
);
4275 if (l234info
& BIT(HNS3_RXD_L2E_B
))
4276 ring
->stats
.l2_err
++;
4278 ring
->stats
.err_pkt_len
++;
4279 u64_stats_update_end(&ring
->syncp
);
4286 /* Do update ip stack process */
4287 skb
->protocol
= eth_type_trans(skb
, netdev
);
4289 /* This is needed in order to enable forwarding support */
4290 ret
= hns3_set_gro_and_checksum(ring
, skb
, l234info
,
4291 bd_base_info
, ol_info
, csum
);
4292 if (unlikely(ret
)) {
4293 hns3_ring_stats_update(ring
, rx_err_cnt
);
4297 l2_frame_type
= hnae3_get_field(l234info
, HNS3_RXD_DMAC_M
,
4300 u64_stats_update_begin(&ring
->syncp
);
4301 ring
->stats
.rx_pkts
++;
4302 ring
->stats
.rx_bytes
+= len
;
4304 if (l2_frame_type
== HNS3_L2_TYPE_MULTICAST
)
4305 ring
->stats
.rx_multicast
++;
4307 u64_stats_update_end(&ring
->syncp
);
4309 ring
->tqp_vector
->rx_group
.total_bytes
+= len
;
4311 hns3_set_rx_skb_rss_type(ring
, skb
, le32_to_cpu(desc
->rx
.rss_hash
),
4316 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
)
4318 struct sk_buff
*skb
= ring
->skb
;
4319 struct hns3_desc_cb
*desc_cb
;
4320 struct hns3_desc
*desc
;
4321 unsigned int length
;
4325 desc
= &ring
->desc
[ring
->next_to_clean
];
4326 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4331 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4332 /* Check valid BD */
4333 if (unlikely(!(bd_base_info
& BIT(HNS3_RXD_VLD_B
))))
4337 length
= le16_to_cpu(desc
->rx
.size
);
4339 ring
->va
= desc_cb
->buf
+ desc_cb
->page_offset
;
4341 dma_sync_single_for_cpu(ring_to_dev(ring
),
4342 desc_cb
->dma
+ desc_cb
->page_offset
,
4343 hns3_buf_size(ring
),
4346 /* Prefetch first cache line of first page.
4347 * Idea is to cache few bytes of the header of the packet.
4348 * Our L1 Cache line size is 64B so need to prefetch twice to make
4349 * it 128B. But in actual we can have greater size of caches with
4350 * 128B Level 1 cache lines. In such a case, single fetch would
4351 * suffice to cache in the relevant part of the header.
4353 net_prefetch(ring
->va
);
4355 ret
= hns3_alloc_skb(ring
, length
, ring
->va
);
4358 if (ret
< 0) /* alloc buffer fail */
4360 if (!(bd_base_info
& BIT(HNS3_RXD_FE_B
))) { /* need add frag */
4361 ret
= hns3_add_frag(ring
);
4366 ret
= hns3_add_frag(ring
);
4371 /* As the head data may be changed when GRO enable, copy
4372 * the head data in after other data rx completed
4374 if (skb
->len
> HNS3_RX_HEAD_SIZE
)
4375 memcpy(skb
->data
, ring
->va
,
4376 ALIGN(ring
->pull_len
, sizeof(long)));
4378 ret
= hns3_handle_bdinfo(ring
, skb
);
4379 if (unlikely(ret
)) {
4380 dev_kfree_skb_any(skb
);
4384 skb_record_rx_queue(skb
, ring
->tqp
->tqp_index
);
4388 int hns3_clean_rx_ring(struct hns3_enet_ring
*ring
, int budget
,
4389 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
4391 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4392 int unused_count
= hns3_desc_unused(ring
);
4393 bool failure
= false;
4397 unused_count
-= ring
->pending_buf
;
4399 while (recv_pkts
< budget
) {
4400 /* Reuse or realloc buffers */
4401 if (unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
4402 failure
= failure
||
4403 hns3_nic_alloc_rx_buffers(ring
, unused_count
);
4408 err
= hns3_handle_rx_bd(ring
);
4409 /* Do not get FE for the packet or failed to alloc skb */
4410 if (unlikely(!ring
->skb
|| err
== -ENXIO
)) {
4412 } else if (likely(!err
)) {
4413 rx_fn(ring
, ring
->skb
);
4417 unused_count
+= ring
->pending_buf
;
4419 ring
->pending_buf
= 0;
4423 /* sync head pointer before exiting, since hardware will calculate
4424 * FBD number with head pointer
4426 if (unused_count
> 0)
4427 failure
= failure
||
4428 hns3_nic_alloc_rx_buffers(ring
, unused_count
);
4430 return failure
? budget
: recv_pkts
;
4433 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector
*tqp_vector
)
4435 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
4436 struct dim_sample sample
= {};
4438 if (!rx_group
->coal
.adapt_enable
)
4441 dim_update_sample(tqp_vector
->event_cnt
, rx_group
->total_packets
,
4442 rx_group
->total_bytes
, &sample
);
4443 net_dim(&rx_group
->dim
, sample
);
4446 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector
*tqp_vector
)
4448 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
4449 struct dim_sample sample
= {};
4451 if (!tx_group
->coal
.adapt_enable
)
4454 dim_update_sample(tqp_vector
->event_cnt
, tx_group
->total_packets
,
4455 tx_group
->total_bytes
, &sample
);
4456 net_dim(&tx_group
->dim
, sample
);
4459 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
4461 struct hns3_nic_priv
*priv
= netdev_priv(napi
->dev
);
4462 struct hns3_enet_ring
*ring
;
4463 int rx_pkt_total
= 0;
4465 struct hns3_enet_tqp_vector
*tqp_vector
=
4466 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
4467 bool clean_complete
= true;
4468 int rx_budget
= budget
;
4470 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
4471 napi_complete(napi
);
4475 /* Since the actual Tx work is minimal, we can give the Tx a larger
4476 * budget and be more aggressive about cleaning up the Tx descriptors.
4478 hns3_for_each_ring(ring
, tqp_vector
->tx_group
)
4479 hns3_clean_tx_ring(ring
, budget
);
4481 /* make sure rx ring budget not smaller than 1 */
4482 if (tqp_vector
->num_tqps
> 1)
4483 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
4485 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
4486 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
4488 if (rx_cleaned
>= rx_budget
)
4489 clean_complete
= false;
4491 rx_pkt_total
+= rx_cleaned
;
4494 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
4496 if (!clean_complete
)
4499 if (napi_complete(napi
) &&
4500 likely(!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
4501 hns3_update_rx_int_coalesce(tqp_vector
);
4502 hns3_update_tx_int_coalesce(tqp_vector
);
4504 hns3_mask_vector_irq(tqp_vector
, 1);
4507 return rx_pkt_total
;
4510 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
4511 struct hnae3_ring_chain_node
**head
,
4514 u32 bit_value
= is_tx
? HNAE3_RING_TYPE_TX
: HNAE3_RING_TYPE_RX
;
4515 u32 field_value
= is_tx
? HNAE3_RING_GL_TX
: HNAE3_RING_GL_RX
;
4516 struct hnae3_ring_chain_node
*cur_chain
= *head
;
4517 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4518 struct hnae3_ring_chain_node
*chain
;
4519 struct hns3_enet_ring
*ring
;
4521 ring
= is_tx
? tqp_vector
->tx_group
.ring
: tqp_vector
->rx_group
.ring
;
4524 while (cur_chain
->next
)
4525 cur_chain
= cur_chain
->next
;
4529 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
4533 cur_chain
->next
= chain
;
4536 chain
->tqp_index
= ring
->tqp
->tqp_index
;
4537 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
4539 hnae3_set_field(chain
->int_gl_idx
,
4540 HNAE3_RING_GL_IDX_M
,
4541 HNAE3_RING_GL_IDX_S
, field_value
);
4551 static struct hnae3_ring_chain_node
*
4552 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
)
4554 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4555 struct hnae3_ring_chain_node
*cur_chain
= NULL
;
4556 struct hnae3_ring_chain_node
*chain
;
4558 if (hns3_create_ring_chain(tqp_vector
, &cur_chain
, true))
4559 goto err_free_chain
;
4561 if (hns3_create_ring_chain(tqp_vector
, &cur_chain
, false))
4562 goto err_free_chain
;
4568 chain
= cur_chain
->next
;
4569 devm_kfree(&pdev
->dev
, cur_chain
);
4576 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
4577 struct hnae3_ring_chain_node
*head
)
4579 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4580 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
4585 chain_tmp
= chain
->next
;
4586 devm_kfree(&pdev
->dev
, chain
);
4591 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
4592 struct hns3_enet_ring
*ring
)
4594 ring
->next
= group
->ring
;
4600 static void hns3_nic_set_cpumask(struct hns3_nic_priv
*priv
)
4602 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
4603 struct hns3_enet_tqp_vector
*tqp_vector
;
4604 int num_vectors
= priv
->vector_num
;
4608 numa_node
= dev_to_node(&pdev
->dev
);
4610 for (vector_i
= 0; vector_i
< num_vectors
; vector_i
++) {
4611 tqp_vector
= &priv
->tqp_vector
[vector_i
];
4612 cpumask_set_cpu(cpumask_local_spread(vector_i
, numa_node
),
4613 &tqp_vector
->affinity_mask
);
4617 static void hns3_rx_dim_work(struct work_struct
*work
)
4619 struct dim
*dim
= container_of(work
, struct dim
, work
);
4620 struct hns3_enet_ring_group
*group
= container_of(dim
,
4621 struct hns3_enet_ring_group
, dim
);
4622 struct hns3_enet_tqp_vector
*tqp_vector
= group
->ring
->tqp_vector
;
4623 struct dim_cq_moder cur_moder
=
4624 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
4626 hns3_set_vector_coalesce_rx_gl(group
->ring
->tqp_vector
, cur_moder
.usec
);
4627 tqp_vector
->rx_group
.coal
.int_gl
= cur_moder
.usec
;
4629 if (cur_moder
.pkts
< tqp_vector
->rx_group
.coal
.int_ql_max
) {
4630 hns3_set_vector_coalesce_rx_ql(tqp_vector
, cur_moder
.pkts
);
4631 tqp_vector
->rx_group
.coal
.int_ql
= cur_moder
.pkts
;
4634 dim
->state
= DIM_START_MEASURE
;
4637 static void hns3_tx_dim_work(struct work_struct
*work
)
4639 struct dim
*dim
= container_of(work
, struct dim
, work
);
4640 struct hns3_enet_ring_group
*group
= container_of(dim
,
4641 struct hns3_enet_ring_group
, dim
);
4642 struct hns3_enet_tqp_vector
*tqp_vector
= group
->ring
->tqp_vector
;
4643 struct dim_cq_moder cur_moder
=
4644 net_dim_get_tx_moderation(dim
->mode
, dim
->profile_ix
);
4646 hns3_set_vector_coalesce_tx_gl(tqp_vector
, cur_moder
.usec
);
4647 tqp_vector
->tx_group
.coal
.int_gl
= cur_moder
.usec
;
4649 if (cur_moder
.pkts
< tqp_vector
->tx_group
.coal
.int_ql_max
) {
4650 hns3_set_vector_coalesce_tx_ql(tqp_vector
, cur_moder
.pkts
);
4651 tqp_vector
->tx_group
.coal
.int_ql
= cur_moder
.pkts
;
4654 dim
->state
= DIM_START_MEASURE
;
4657 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector
*tqp_vector
)
4659 INIT_WORK(&tqp_vector
->rx_group
.dim
.work
, hns3_rx_dim_work
);
4660 INIT_WORK(&tqp_vector
->tx_group
.dim
.work
, hns3_tx_dim_work
);
4663 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
4665 struct hnae3_handle
*h
= priv
->ae_handle
;
4666 struct hns3_enet_tqp_vector
*tqp_vector
;
4670 hns3_nic_set_cpumask(priv
);
4672 for (i
= 0; i
< priv
->vector_num
; i
++) {
4673 tqp_vector
= &priv
->tqp_vector
[i
];
4674 hns3_vector_coalesce_init_hw(tqp_vector
, priv
);
4675 tqp_vector
->num_tqps
= 0;
4676 hns3_nic_init_dim(tqp_vector
);
4679 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
4680 u16 vector_i
= i
% priv
->vector_num
;
4681 u16 tqp_num
= h
->kinfo
.num_tqps
;
4683 tqp_vector
= &priv
->tqp_vector
[vector_i
];
4685 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
4688 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
4689 &priv
->ring
[i
+ tqp_num
]);
4691 priv
->ring
[i
].tqp_vector
= tqp_vector
;
4692 priv
->ring
[i
+ tqp_num
].tqp_vector
= tqp_vector
;
4693 tqp_vector
->num_tqps
++;
4696 for (i
= 0; i
< priv
->vector_num
; i
++) {
4697 struct hnae3_ring_chain_node
*vector_ring_chain
;
4699 tqp_vector
= &priv
->tqp_vector
[i
];
4701 tqp_vector
->rx_group
.total_bytes
= 0;
4702 tqp_vector
->rx_group
.total_packets
= 0;
4703 tqp_vector
->tx_group
.total_bytes
= 0;
4704 tqp_vector
->tx_group
.total_packets
= 0;
4705 tqp_vector
->handle
= h
;
4707 vector_ring_chain
= hns3_get_vector_ring_chain(tqp_vector
);
4708 if (!vector_ring_chain
) {
4713 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
4714 tqp_vector
->vector_irq
, vector_ring_chain
);
4716 hns3_free_vector_ring_chain(tqp_vector
, vector_ring_chain
);
4721 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
4722 hns3_nic_common_poll
);
4729 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
4734 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv
*priv
)
4736 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
4737 struct hns3_enet_coalesce
*tx_coal
= &priv
->tx_coal
;
4738 struct hns3_enet_coalesce
*rx_coal
= &priv
->rx_coal
;
4740 /* initialize the configuration for interrupt coalescing.
4741 * 1. GL (Interrupt Gap Limiter)
4742 * 2. RL (Interrupt Rate Limiter)
4743 * 3. QL (Interrupt Quantity Limiter)
4745 * Default: enable interrupt coalescing self-adaptive and GL
4747 tx_coal
->adapt_enable
= 1;
4748 rx_coal
->adapt_enable
= 1;
4750 tx_coal
->int_gl
= HNS3_INT_GL_50K
;
4751 rx_coal
->int_gl
= HNS3_INT_GL_50K
;
4753 rx_coal
->flow_level
= HNS3_FLOW_LOW
;
4754 tx_coal
->flow_level
= HNS3_FLOW_LOW
;
4756 if (ae_dev
->dev_specs
.int_ql_max
) {
4757 tx_coal
->int_ql
= HNS3_INT_QL_DEFAULT_CFG
;
4758 rx_coal
->int_ql
= HNS3_INT_QL_DEFAULT_CFG
;
4762 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
4764 struct hnae3_handle
*h
= priv
->ae_handle
;
4765 struct hns3_enet_tqp_vector
*tqp_vector
;
4766 struct hnae3_vector_info
*vector
;
4767 struct pci_dev
*pdev
= h
->pdev
;
4768 u16 tqp_num
= h
->kinfo
.num_tqps
;
4773 /* RSS size, cpu online and vector_num should be the same */
4774 /* Should consider 2p/4p later */
4775 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
4777 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
4782 /* save the actual available vector number */
4783 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
4785 priv
->vector_num
= vector_num
;
4786 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
4787 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
4789 if (!priv
->tqp_vector
) {
4794 for (i
= 0; i
< priv
->vector_num
; i
++) {
4795 tqp_vector
= &priv
->tqp_vector
[i
];
4796 tqp_vector
->idx
= i
;
4797 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
4798 tqp_vector
->vector_irq
= vector
[i
].vector
;
4799 hns3_vector_coalesce_init(tqp_vector
, priv
);
4803 devm_kfree(&pdev
->dev
, vector
);
4807 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
4813 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
4815 struct hnae3_ring_chain_node
*vector_ring_chain
;
4816 struct hnae3_handle
*h
= priv
->ae_handle
;
4817 struct hns3_enet_tqp_vector
*tqp_vector
;
4820 for (i
= 0; i
< priv
->vector_num
; i
++) {
4821 tqp_vector
= &priv
->tqp_vector
[i
];
4823 if (!tqp_vector
->rx_group
.ring
&& !tqp_vector
->tx_group
.ring
)
4826 /* Since the mapping can be overwritten, when fail to get the
4827 * chain between vector and ring, we should go on to deal with
4828 * the remaining options.
4830 vector_ring_chain
= hns3_get_vector_ring_chain(tqp_vector
);
4831 if (!vector_ring_chain
)
4832 dev_warn(priv
->dev
, "failed to get ring chain\n");
4834 h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
4835 tqp_vector
->vector_irq
, vector_ring_chain
);
4837 hns3_free_vector_ring_chain(tqp_vector
, vector_ring_chain
);
4839 hns3_clear_ring_group(&tqp_vector
->rx_group
);
4840 hns3_clear_ring_group(&tqp_vector
->tx_group
);
4841 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
4845 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
4847 struct hnae3_handle
*h
= priv
->ae_handle
;
4848 struct pci_dev
*pdev
= h
->pdev
;
4851 for (i
= 0; i
< priv
->vector_num
; i
++) {
4852 struct hns3_enet_tqp_vector
*tqp_vector
;
4854 tqp_vector
= &priv
->tqp_vector
[i
];
4855 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
4860 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
4863 static void hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
4864 unsigned int ring_type
)
4866 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
4867 struct hns3_enet_ring
*ring
;
4870 if (ring_type
== HNAE3_RING_TYPE_TX
) {
4871 ring
= &priv
->ring
[q
->tqp_index
];
4872 desc_num
= priv
->ae_handle
->kinfo
.num_tx_desc
;
4873 ring
->queue_index
= q
->tqp_index
;
4874 ring
->tx_copybreak
= priv
->tx_copybreak
;
4875 ring
->last_to_use
= 0;
4877 ring
= &priv
->ring
[q
->tqp_index
+ queue_num
];
4878 desc_num
= priv
->ae_handle
->kinfo
.num_rx_desc
;
4879 ring
->queue_index
= q
->tqp_index
;
4880 ring
->rx_copybreak
= priv
->rx_copybreak
;
4883 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
4887 ring
->desc_cb
= NULL
;
4888 ring
->dev
= priv
->dev
;
4889 ring
->desc_dma_addr
= 0;
4890 ring
->buf_size
= q
->buf_size
;
4891 ring
->desc_num
= desc_num
;
4892 ring
->next_to_use
= 0;
4893 ring
->next_to_clean
= 0;
4896 static void hns3_queue_to_ring(struct hnae3_queue
*tqp
,
4897 struct hns3_nic_priv
*priv
)
4899 hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
4900 hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
4903 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
4905 struct hnae3_handle
*h
= priv
->ae_handle
;
4906 struct pci_dev
*pdev
= h
->pdev
;
4909 priv
->ring
= devm_kzalloc(&pdev
->dev
,
4910 array3_size(h
->kinfo
.num_tqps
,
4911 sizeof(*priv
->ring
), 2),
4916 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
4917 hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
4922 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
4927 devm_kfree(priv
->dev
, priv
->ring
);
4931 static void hns3_alloc_page_pool(struct hns3_enet_ring
*ring
)
4933 struct page_pool_params pp_params
= {
4934 .flags
= PP_FLAG_DMA_MAP
| PP_FLAG_PAGE_FRAG
|
4935 PP_FLAG_DMA_SYNC_DEV
,
4936 .order
= hns3_page_order(ring
),
4937 .pool_size
= ring
->desc_num
* hns3_buf_size(ring
) /
4938 (PAGE_SIZE
<< hns3_page_order(ring
)),
4939 .nid
= dev_to_node(ring_to_dev(ring
)),
4940 .dev
= ring_to_dev(ring
),
4941 .dma_dir
= DMA_FROM_DEVICE
,
4943 .max_len
= PAGE_SIZE
<< hns3_page_order(ring
),
4946 ring
->page_pool
= page_pool_create(&pp_params
);
4947 if (IS_ERR(ring
->page_pool
)) {
4948 dev_warn(ring_to_dev(ring
), "page pool creation failed: %ld\n",
4949 PTR_ERR(ring
->page_pool
));
4950 ring
->page_pool
= NULL
;
4954 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
4958 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
4961 ring
->desc_cb
= devm_kcalloc(ring_to_dev(ring
), ring
->desc_num
,
4962 sizeof(ring
->desc_cb
[0]), GFP_KERNEL
);
4963 if (!ring
->desc_cb
) {
4968 ret
= hns3_alloc_desc(ring
);
4970 goto out_with_desc_cb
;
4972 if (!HNAE3_IS_TX_RING(ring
)) {
4973 if (page_pool_enabled
)
4974 hns3_alloc_page_pool(ring
);
4976 ret
= hns3_alloc_ring_buffers(ring
);
4980 hns3_init_tx_spare_buffer(ring
);
4986 hns3_free_desc(ring
);
4988 devm_kfree(ring_to_dev(ring
), ring
->desc_cb
);
4989 ring
->desc_cb
= NULL
;
4994 void hns3_fini_ring(struct hns3_enet_ring
*ring
)
4996 hns3_free_desc(ring
);
4997 devm_kfree(ring_to_dev(ring
), ring
->desc_cb
);
4998 ring
->desc_cb
= NULL
;
4999 ring
->next_to_clean
= 0;
5000 ring
->next_to_use
= 0;
5001 ring
->last_to_use
= 0;
5002 ring
->pending_buf
= 0;
5003 if (!HNAE3_IS_TX_RING(ring
) && ring
->skb
) {
5004 dev_kfree_skb_any(ring
->skb
);
5006 } else if (HNAE3_IS_TX_RING(ring
) && ring
->tx_spare
) {
5007 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
5009 dma_unmap_page(ring_to_dev(ring
), tx_spare
->dma
, tx_spare
->len
,
5011 free_pages((unsigned long)tx_spare
->buf
,
5012 get_order(tx_spare
->len
));
5013 devm_kfree(ring_to_dev(ring
), tx_spare
);
5014 ring
->tx_spare
= NULL
;
5017 if (!HNAE3_IS_TX_RING(ring
) && ring
->page_pool
) {
5018 page_pool_destroy(ring
->page_pool
);
5019 ring
->page_pool
= NULL
;
5023 static int hns3_buf_size2type(u32 buf_size
)
5029 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
5032 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
5035 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
5038 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
5041 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
5044 return bd_size_type
;
5047 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
5049 dma_addr_t dma
= ring
->desc_dma_addr
;
5050 struct hnae3_queue
*q
= ring
->tqp
;
5052 if (!HNAE3_IS_TX_RING(ring
)) {
5053 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
, (u32
)dma
);
5054 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
5055 (u32
)((dma
>> 31) >> 1));
5057 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
5058 hns3_buf_size2type(ring
->buf_size
));
5059 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
5060 ring
->desc_num
/ 8 - 1);
5062 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
5064 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
5065 (u32
)((dma
>> 31) >> 1));
5067 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
5068 ring
->desc_num
/ 8 - 1);
5072 static void hns3_init_tx_ring_tc(struct hns3_nic_priv
*priv
)
5074 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
5075 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
5078 for (i
= 0; i
< tc_info
->num_tc
; i
++) {
5081 for (j
= 0; j
< tc_info
->tqp_count
[i
]; j
++) {
5082 struct hnae3_queue
*q
;
5084 q
= priv
->ring
[tc_info
->tqp_offset
[i
] + j
].tqp
;
5085 hns3_write_dev(q
, HNS3_RING_TX_RING_TC_REG
, i
);
5090 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
5092 struct hnae3_handle
*h
= priv
->ae_handle
;
5093 int ring_num
= h
->kinfo
.num_tqps
* 2;
5097 for (i
= 0; i
< ring_num
; i
++) {
5098 ret
= hns3_alloc_ring_memory(&priv
->ring
[i
]);
5101 "Alloc ring memory fail! ret=%d\n", ret
);
5102 goto out_when_alloc_ring_memory
;
5105 u64_stats_init(&priv
->ring
[i
].syncp
);
5110 out_when_alloc_ring_memory
:
5111 for (j
= i
- 1; j
>= 0; j
--)
5112 hns3_fini_ring(&priv
->ring
[j
]);
5117 static void hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
5119 struct hnae3_handle
*h
= priv
->ae_handle
;
5122 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5123 hns3_fini_ring(&priv
->ring
[i
]);
5124 hns3_fini_ring(&priv
->ring
[i
+ h
->kinfo
.num_tqps
]);
5128 /* Set mac addr if it is configured. or leave it to the AE driver */
5129 static int hns3_init_mac_addr(struct net_device
*netdev
)
5131 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5132 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
5133 struct hnae3_handle
*h
= priv
->ae_handle
;
5134 u8 mac_addr_temp
[ETH_ALEN
];
5137 if (h
->ae_algo
->ops
->get_mac_addr
)
5138 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
5140 /* Check if the MAC address is valid, if not get a random one */
5141 if (!is_valid_ether_addr(mac_addr_temp
)) {
5142 eth_hw_addr_random(netdev
);
5143 hnae3_format_mac_addr(format_mac_addr
, netdev
->dev_addr
);
5144 dev_warn(priv
->dev
, "using random MAC address %s\n",
5146 } else if (!ether_addr_equal(netdev
->dev_addr
, mac_addr_temp
)) {
5147 eth_hw_addr_set(netdev
, mac_addr_temp
);
5148 ether_addr_copy(netdev
->perm_addr
, mac_addr_temp
);
5153 if (h
->ae_algo
->ops
->set_mac_addr
)
5154 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
5159 static int hns3_init_phy(struct net_device
*netdev
)
5161 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5164 if (h
->ae_algo
->ops
->mac_connect_phy
)
5165 ret
= h
->ae_algo
->ops
->mac_connect_phy(h
);
5170 static void hns3_uninit_phy(struct net_device
*netdev
)
5172 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5174 if (h
->ae_algo
->ops
->mac_disconnect_phy
)
5175 h
->ae_algo
->ops
->mac_disconnect_phy(h
);
5178 static int hns3_client_start(struct hnae3_handle
*handle
)
5180 if (!handle
->ae_algo
->ops
->client_start
)
5183 return handle
->ae_algo
->ops
->client_start(handle
);
5186 static void hns3_client_stop(struct hnae3_handle
*handle
)
5188 if (!handle
->ae_algo
->ops
->client_stop
)
5191 handle
->ae_algo
->ops
->client_stop(handle
);
5194 static void hns3_info_show(struct hns3_nic_priv
*priv
)
5196 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
5197 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
5199 hnae3_format_mac_addr(format_mac_addr
, priv
->netdev
->dev_addr
);
5200 dev_info(priv
->dev
, "MAC address: %s\n", format_mac_addr
);
5201 dev_info(priv
->dev
, "Task queue pairs numbers: %u\n", kinfo
->num_tqps
);
5202 dev_info(priv
->dev
, "RSS size: %u\n", kinfo
->rss_size
);
5203 dev_info(priv
->dev
, "Allocated RSS size: %u\n", kinfo
->req_rss_size
);
5204 dev_info(priv
->dev
, "RX buffer length: %u\n", kinfo
->rx_buf_len
);
5205 dev_info(priv
->dev
, "Desc num per TX queue: %u\n", kinfo
->num_tx_desc
);
5206 dev_info(priv
->dev
, "Desc num per RX queue: %u\n", kinfo
->num_rx_desc
);
5207 dev_info(priv
->dev
, "Total number of enabled TCs: %u\n",
5208 kinfo
->tc_info
.num_tc
);
5209 dev_info(priv
->dev
, "Max mtu size: %u\n", priv
->netdev
->max_mtu
);
5212 static void hns3_set_cq_period_mode(struct hns3_nic_priv
*priv
,
5213 enum dim_cq_period_mode mode
, bool is_tx
)
5215 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
5216 struct hnae3_handle
*handle
= priv
->ae_handle
;
5220 priv
->tx_cqe_mode
= mode
;
5222 for (i
= 0; i
< priv
->vector_num
; i
++)
5223 priv
->tqp_vector
[i
].tx_group
.dim
.mode
= mode
;
5225 priv
->rx_cqe_mode
= mode
;
5227 for (i
= 0; i
< priv
->vector_num
; i
++)
5228 priv
->tqp_vector
[i
].rx_group
.dim
.mode
= mode
;
5231 if (hnae3_ae_dev_cq_supported(ae_dev
)) {
5235 new_mode
= (mode
== DIM_CQ_PERIOD_MODE_START_FROM_CQE
) ?
5236 HNS3_CQ_MODE_CQE
: HNS3_CQ_MODE_EQE
;
5237 reg
= is_tx
? HNS3_GL1_CQ_MODE_REG
: HNS3_GL0_CQ_MODE_REG
;
5239 writel(new_mode
, handle
->kinfo
.io_base
+ reg
);
5243 void hns3_cq_period_mode_init(struct hns3_nic_priv
*priv
,
5244 enum dim_cq_period_mode tx_mode
,
5245 enum dim_cq_period_mode rx_mode
)
5247 hns3_set_cq_period_mode(priv
, tx_mode
, true);
5248 hns3_set_cq_period_mode(priv
, rx_mode
, false);
5251 static void hns3_state_init(struct hnae3_handle
*handle
)
5253 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
5254 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5255 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5257 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5259 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
5260 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE
, &priv
->state
);
5262 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
5263 set_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->supported_pflags
);
5265 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
))
5266 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE
, &priv
->state
);
5268 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev
))
5269 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
);
5272 static void hns3_state_uninit(struct hnae3_handle
*handle
)
5274 struct hns3_nic_priv
*priv
= handle
->priv
;
5276 clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5279 static int hns3_client_init(struct hnae3_handle
*handle
)
5281 struct pci_dev
*pdev
= handle
->pdev
;
5282 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
5283 u16 alloc_tqps
, max_rss_size
;
5284 struct hns3_nic_priv
*priv
;
5285 struct net_device
*netdev
;
5288 handle
->ae_algo
->ops
->get_tqps_and_rss_info(handle
, &alloc_tqps
,
5290 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
), alloc_tqps
);
5294 priv
= netdev_priv(netdev
);
5295 priv
->dev
= &pdev
->dev
;
5296 priv
->netdev
= netdev
;
5297 priv
->ae_handle
= handle
;
5298 priv
->tx_timeout_count
= 0;
5299 priv
->max_non_tso_bd_num
= ae_dev
->dev_specs
.max_non_tso_bd_num
;
5300 set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
5302 handle
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_LEVEL
);
5304 handle
->kinfo
.netdev
= netdev
;
5305 handle
->priv
= (void *)priv
;
5307 hns3_init_mac_addr(netdev
);
5309 hns3_set_default_feature(netdev
);
5311 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
5312 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5313 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
5314 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5315 hns3_ethtool_set_ops(netdev
);
5317 /* Carrier off reporting is important to ethtool even BEFORE open */
5318 netif_carrier_off(netdev
);
5320 ret
= hns3_get_ring_config(priv
);
5323 goto out_get_ring_cfg
;
5326 hns3_nic_init_coal_cfg(priv
);
5328 ret
= hns3_nic_alloc_vector_data(priv
);
5331 goto out_alloc_vector_data
;
5334 ret
= hns3_nic_init_vector_data(priv
);
5337 goto out_init_vector_data
;
5340 ret
= hns3_init_all_ring(priv
);
5346 hns3_cq_period_mode_init(priv
, DIM_CQ_PERIOD_MODE_START_FROM_EQE
,
5347 DIM_CQ_PERIOD_MODE_START_FROM_EQE
);
5349 ret
= hns3_init_phy(netdev
);
5353 /* the device can work without cpu rmap, only aRFS needs it */
5354 ret
= hns3_set_rx_cpu_rmap(netdev
);
5356 dev_warn(priv
->dev
, "set rx cpu rmap fail, ret=%d\n", ret
);
5358 ret
= hns3_nic_init_irq(priv
);
5360 dev_err(priv
->dev
, "init irq failed! ret=%d\n", ret
);
5361 hns3_free_rx_cpu_rmap(netdev
);
5362 goto out_init_irq_fail
;
5365 ret
= hns3_client_start(handle
);
5367 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
5368 goto out_client_start
;
5371 hns3_dcbnl_setup(handle
);
5373 ret
= hns3_dbg_init(handle
);
5375 dev_err(priv
->dev
, "failed to init debugfs, ret = %d\n",
5377 goto out_client_start
;
5380 netdev
->max_mtu
= HNS3_MAX_MTU(ae_dev
->dev_specs
.max_frm_size
);
5382 hns3_state_init(handle
);
5384 ret
= register_netdev(netdev
);
5386 dev_err(priv
->dev
, "probe register netdev fail!\n");
5387 goto out_reg_netdev_fail
;
5390 if (netif_msg_drv(handle
))
5391 hns3_info_show(priv
);
5395 out_reg_netdev_fail
:
5396 hns3_state_uninit(handle
);
5397 hns3_dbg_uninit(handle
);
5398 hns3_client_stop(handle
);
5400 hns3_free_rx_cpu_rmap(netdev
);
5401 hns3_nic_uninit_irq(priv
);
5403 hns3_uninit_phy(netdev
);
5405 hns3_uninit_all_ring(priv
);
5407 hns3_nic_uninit_vector_data(priv
);
5408 out_init_vector_data
:
5409 hns3_nic_dealloc_vector_data(priv
);
5410 out_alloc_vector_data
:
5413 priv
->ae_handle
= NULL
;
5414 free_netdev(netdev
);
5418 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
5420 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5421 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5423 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
5424 unregister_netdev(netdev
);
5426 hns3_client_stop(handle
);
5428 hns3_uninit_phy(netdev
);
5430 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5431 netdev_warn(netdev
, "already uninitialized\n");
5432 goto out_netdev_free
;
5435 hns3_free_rx_cpu_rmap(netdev
);
5437 hns3_nic_uninit_irq(priv
);
5439 hns3_clear_all_ring(handle
, true);
5441 hns3_nic_uninit_vector_data(priv
);
5443 hns3_nic_dealloc_vector_data(priv
);
5445 hns3_uninit_all_ring(priv
);
5447 hns3_put_ring_config(priv
);
5450 hns3_dbg_uninit(handle
);
5451 free_netdev(netdev
);
5454 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
5456 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5462 netif_tx_wake_all_queues(netdev
);
5463 netif_carrier_on(netdev
);
5464 if (netif_msg_link(handle
))
5465 netdev_info(netdev
, "link up\n");
5467 netif_carrier_off(netdev
);
5468 netif_tx_stop_all_queues(netdev
);
5469 if (netif_msg_link(handle
))
5470 netdev_info(netdev
, "link down\n");
5474 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
5476 while (ring
->next_to_clean
!= ring
->next_to_use
) {
5477 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
5478 hns3_free_buffer_detach(ring
, ring
->next_to_clean
, 0);
5479 ring_ptr_move_fw(ring
, next_to_clean
);
5482 ring
->pending_buf
= 0;
5485 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
5487 struct hns3_desc_cb res_cbs
;
5490 while (ring
->next_to_use
!= ring
->next_to_clean
) {
5491 /* When a buffer is not reused, it's memory has been
5492 * freed in hns3_handle_rx_bd or will be freed by
5493 * stack, so we need to replace the buffer here.
5495 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
5496 ret
= hns3_alloc_and_map_buffer(ring
, &res_cbs
);
5498 hns3_ring_stats_update(ring
, sw_err_cnt
);
5499 /* if alloc new buffer fail, exit directly
5500 * and reclear in up flow.
5502 netdev_warn(ring_to_netdev(ring
),
5503 "reserve buffer map failed, ret = %d\n",
5507 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
5509 ring_ptr_move_fw(ring
, next_to_use
);
5512 /* Free the pending skb in rx ring */
5514 dev_kfree_skb_any(ring
->skb
);
5516 ring
->pending_buf
= 0;
5522 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
5524 while (ring
->next_to_use
!= ring
->next_to_clean
) {
5525 /* When a buffer is not reused, it's memory has been
5526 * freed in hns3_handle_rx_bd or will be freed by
5527 * stack, so only need to unmap the buffer here.
5529 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
5530 hns3_unmap_buffer(ring
,
5531 &ring
->desc_cb
[ring
->next_to_use
]);
5532 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
5535 ring_ptr_move_fw(ring
, next_to_use
);
5539 static void hns3_clear_all_ring(struct hnae3_handle
*h
, bool force
)
5541 struct net_device
*ndev
= h
->kinfo
.netdev
;
5542 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5545 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5546 struct hns3_enet_ring
*ring
;
5548 ring
= &priv
->ring
[i
];
5549 hns3_clear_tx_ring(ring
);
5551 ring
= &priv
->ring
[i
+ h
->kinfo
.num_tqps
];
5552 /* Continue to clear other rings even if clearing some
5556 hns3_force_clear_rx_ring(ring
);
5558 hns3_clear_rx_ring(ring
);
5562 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
5564 struct net_device
*ndev
= h
->kinfo
.netdev
;
5565 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5566 struct hns3_enet_ring
*rx_ring
;
5570 ret
= h
->ae_algo
->ops
->reset_queue(h
);
5574 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5575 hns3_init_ring_hw(&priv
->ring
[i
]);
5577 /* We need to clear tx ring here because self test will
5578 * use the ring and will not run down before up
5580 hns3_clear_tx_ring(&priv
->ring
[i
]);
5581 priv
->ring
[i
].next_to_clean
= 0;
5582 priv
->ring
[i
].next_to_use
= 0;
5583 priv
->ring
[i
].last_to_use
= 0;
5585 rx_ring
= &priv
->ring
[i
+ h
->kinfo
.num_tqps
];
5586 hns3_init_ring_hw(rx_ring
);
5587 ret
= hns3_clear_rx_ring(rx_ring
);
5591 /* We can not know the hardware head and tail when this
5592 * function is called in reset flow, so we reuse all desc.
5594 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
5595 hns3_reuse_buffer(rx_ring
, j
);
5597 rx_ring
->next_to_clean
= 0;
5598 rx_ring
->next_to_use
= 0;
5601 hns3_init_tx_ring_tc(priv
);
5606 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
5608 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5609 struct net_device
*ndev
= kinfo
->netdev
;
5610 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5612 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
))
5615 if (!netif_running(ndev
))
5618 return hns3_nic_net_stop(ndev
);
5621 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
5623 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5624 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
5627 if (!test_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5628 netdev_err(kinfo
->netdev
, "device is not initialized yet\n");
5632 clear_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
5634 if (netif_running(kinfo
->netdev
)) {
5635 ret
= hns3_nic_net_open(kinfo
->netdev
);
5637 set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
5638 netdev_err(kinfo
->netdev
,
5639 "net up fail, ret=%d!\n", ret
);
5647 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
5649 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5650 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5653 /* Carrier off reporting is important to ethtool even BEFORE open */
5654 netif_carrier_off(netdev
);
5656 ret
= hns3_get_ring_config(priv
);
5660 ret
= hns3_nic_alloc_vector_data(priv
);
5664 ret
= hns3_nic_init_vector_data(priv
);
5666 goto err_dealloc_vector
;
5668 ret
= hns3_init_all_ring(priv
);
5670 goto err_uninit_vector
;
5672 hns3_cq_period_mode_init(priv
, priv
->tx_cqe_mode
, priv
->rx_cqe_mode
);
5674 /* the device can work without cpu rmap, only aRFS needs it */
5675 ret
= hns3_set_rx_cpu_rmap(netdev
);
5677 dev_warn(priv
->dev
, "set rx cpu rmap fail, ret=%d\n", ret
);
5679 ret
= hns3_nic_init_irq(priv
);
5681 dev_err(priv
->dev
, "init irq failed! ret=%d\n", ret
);
5682 hns3_free_rx_cpu_rmap(netdev
);
5683 goto err_init_irq_fail
;
5686 if (!hns3_is_phys_func(handle
->pdev
))
5687 hns3_init_mac_addr(netdev
);
5689 ret
= hns3_client_start(handle
);
5691 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
5692 goto err_client_start_fail
;
5695 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5699 err_client_start_fail
:
5700 hns3_free_rx_cpu_rmap(netdev
);
5701 hns3_nic_uninit_irq(priv
);
5703 hns3_uninit_all_ring(priv
);
5705 hns3_nic_uninit_vector_data(priv
);
5707 hns3_nic_dealloc_vector_data(priv
);
5709 hns3_put_ring_config(priv
);
5714 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
5716 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5717 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5719 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5720 netdev_warn(netdev
, "already uninitialized\n");
5724 hns3_free_rx_cpu_rmap(netdev
);
5725 hns3_nic_uninit_irq(priv
);
5726 hns3_clear_all_ring(handle
, true);
5727 hns3_reset_tx_queue(priv
->ae_handle
);
5729 hns3_nic_uninit_vector_data(priv
);
5731 hns3_nic_dealloc_vector_data(priv
);
5733 hns3_uninit_all_ring(priv
);
5735 hns3_put_ring_config(priv
);
5740 int hns3_reset_notify(struct hnae3_handle
*handle
,
5741 enum hnae3_reset_notify_type type
)
5746 case HNAE3_UP_CLIENT
:
5747 ret
= hns3_reset_notify_up_enet(handle
);
5749 case HNAE3_DOWN_CLIENT
:
5750 ret
= hns3_reset_notify_down_enet(handle
);
5752 case HNAE3_INIT_CLIENT
:
5753 ret
= hns3_reset_notify_init_enet(handle
);
5755 case HNAE3_UNINIT_CLIENT
:
5756 ret
= hns3_reset_notify_uninit_enet(handle
);
5765 static int hns3_change_channels(struct hnae3_handle
*handle
, u32 new_tqp_num
,
5766 bool rxfh_configured
)
5770 ret
= handle
->ae_algo
->ops
->set_channels(handle
, new_tqp_num
,
5773 dev_err(&handle
->pdev
->dev
,
5774 "Change tqp num(%u) fail.\n", new_tqp_num
);
5778 ret
= hns3_reset_notify(handle
, HNAE3_INIT_CLIENT
);
5782 ret
= hns3_reset_notify(handle
, HNAE3_UP_CLIENT
);
5784 hns3_reset_notify(handle
, HNAE3_UNINIT_CLIENT
);
5789 int hns3_set_channels(struct net_device
*netdev
,
5790 struct ethtool_channels
*ch
)
5792 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5793 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
5794 bool rxfh_configured
= netif_is_rxfh_configured(netdev
);
5795 u32 new_tqp_num
= ch
->combined_count
;
5799 if (hns3_nic_resetting(netdev
))
5802 if (ch
->rx_count
|| ch
->tx_count
)
5805 if (kinfo
->tc_info
.mqprio_active
) {
5806 dev_err(&netdev
->dev
,
5807 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5811 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
5813 dev_err(&netdev
->dev
,
5814 "Change tqps fail, the tqp range is from 1 to %u",
5815 hns3_get_max_available_channels(h
));
5819 if (kinfo
->rss_size
== new_tqp_num
)
5822 netif_dbg(h
, drv
, netdev
,
5823 "set channels: tqp_num=%u, rxfh=%d\n",
5824 new_tqp_num
, rxfh_configured
);
5826 ret
= hns3_reset_notify(h
, HNAE3_DOWN_CLIENT
);
5830 ret
= hns3_reset_notify(h
, HNAE3_UNINIT_CLIENT
);
5834 org_tqp_num
= h
->kinfo
.num_tqps
;
5835 ret
= hns3_change_channels(h
, new_tqp_num
, rxfh_configured
);
5840 "Change channels fail, revert to old value\n");
5841 ret1
= hns3_change_channels(h
, org_tqp_num
, rxfh_configured
);
5844 "revert to old channel fail\n");
5854 void hns3_external_lb_prepare(struct net_device
*ndev
, bool if_running
)
5856 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5857 struct hnae3_handle
*h
= priv
->ae_handle
;
5863 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
5866 netif_carrier_off(ndev
);
5867 netif_tx_disable(ndev
);
5869 for (i
= 0; i
< priv
->vector_num
; i
++)
5870 hns3_vector_disable(&priv
->tqp_vector
[i
]);
5872 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
5873 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
5875 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
5876 * during reset process, because driver may not be able
5877 * to disable the ring through firmware when downing the netdev.
5879 if (!hns3_nic_resetting(ndev
))
5880 hns3_nic_reset_all_ring(priv
->ae_handle
);
5882 hns3_reset_tx_queue(priv
->ae_handle
);
5885 void hns3_external_lb_restore(struct net_device
*ndev
, bool if_running
)
5887 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5888 struct hnae3_handle
*h
= priv
->ae_handle
;
5894 if (hns3_nic_resetting(ndev
))
5897 if (!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
5900 if (hns3_nic_reset_all_ring(priv
->ae_handle
))
5903 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
5905 for (i
= 0; i
< priv
->vector_num
; i
++)
5906 hns3_vector_enable(&priv
->tqp_vector
[i
]);
5908 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
5909 hns3_tqp_enable(h
->kinfo
.tqp
[i
]);
5911 netif_tx_wake_all_queues(ndev
);
5913 if (h
->ae_algo
->ops
->get_status(h
))
5914 netif_carrier_on(ndev
);
5917 static const struct hns3_hw_error_info hns3_hw_err
[] = {
5918 { .type
= HNAE3_PPU_POISON_ERROR
,
5919 .msg
= "PPU poison" },
5920 { .type
= HNAE3_CMDQ_ECC_ERROR
,
5921 .msg
= "IMP CMDQ error" },
5922 { .type
= HNAE3_IMP_RD_POISON_ERROR
,
5923 .msg
= "IMP RD poison" },
5924 { .type
= HNAE3_ROCEE_AXI_RESP_ERROR
,
5925 .msg
= "ROCEE AXI RESP error" },
5928 static void hns3_process_hw_error(struct hnae3_handle
*handle
,
5929 enum hnae3_hw_error_type type
)
5933 for (i
= 0; i
< ARRAY_SIZE(hns3_hw_err
); i
++) {
5934 if (hns3_hw_err
[i
].type
== type
) {
5935 dev_err(&handle
->pdev
->dev
, "Detected %s!\n",
5936 hns3_hw_err
[i
].msg
);
5942 static const struct hnae3_client_ops client_ops
= {
5943 .init_instance
= hns3_client_init
,
5944 .uninit_instance
= hns3_client_uninit
,
5945 .link_status_change
= hns3_link_status_change
,
5946 .reset_notify
= hns3_reset_notify
,
5947 .process_hw_error
= hns3_process_hw_error
,
5950 /* hns3_init_module - Driver registration routine
5951 * hns3_init_module is the first routine called when the driver is
5952 * loaded. All it does is register with the PCI subsystem.
5954 static int __init
hns3_init_module(void)
5958 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
5959 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
5961 client
.type
= HNAE3_CLIENT_KNIC
;
5962 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
, "%s",
5965 client
.ops
= &client_ops
;
5967 INIT_LIST_HEAD(&client
.node
);
5969 hns3_dbg_register_debugfs(hns3_driver_name
);
5971 ret
= hnae3_register_client(&client
);
5973 goto err_reg_client
;
5975 ret
= pci_register_driver(&hns3_driver
);
5977 goto err_reg_driver
;
5982 hnae3_unregister_client(&client
);
5984 hns3_dbg_unregister_debugfs();
5987 module_init(hns3_init_module
);
5989 /* hns3_exit_module - Driver exit cleanup routine
5990 * hns3_exit_module is called just before the driver is removed
5993 static void __exit
hns3_exit_module(void)
5995 pci_unregister_driver(&hns3_driver
);
5996 hnae3_unregister_client(&client
);
5997 hns3_dbg_unregister_debugfs();
5999 module_exit(hns3_exit_module
);
6001 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
6002 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6003 MODULE_LICENSE("GPL");
6004 MODULE_ALIAS("pci:hns-nic");