1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
10 #include <linux/if_vlan.h>
11 #include <linux/irq.h>
13 #include <linux/ipv6.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/skbuff.h>
17 #include <linux/sctp.h>
20 #include <net/ip6_checksum.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
24 #include <net/vxlan.h>
25 #include <net/geneve.h>
28 #include "hns3_enet.h"
29 /* All hns3 tracepoints are defined by the include below, which
30 * must be included exactly once across the whole kernel with
31 * CREATE_TRACE_POINTS defined
33 #define CREATE_TRACE_POINTS
34 #include "hns3_trace.h"
36 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
37 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
39 #define hns3_rl_err(fmt, ...) \
41 if (net_ratelimit()) \
42 netdev_err(fmt, ##__VA_ARGS__); \
45 static void hns3_clear_all_ring(struct hnae3_handle
*h
, bool force
);
47 static const char hns3_driver_name
[] = "hns3";
48 static const char hns3_driver_string
[] =
49 "Hisilicon Ethernet Network Driver for Hip08 Family";
50 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
51 static struct hnae3_client client
;
53 static int debug
= -1;
54 module_param(debug
, int, 0);
55 MODULE_PARM_DESC(debug
, " Network interface message level setting");
57 static unsigned int tx_sgl
= 1;
58 module_param(tx_sgl
, uint
, 0600);
59 MODULE_PARM_DESC(tx_sgl
, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
61 static bool page_pool_enabled
= true;
62 module_param(page_pool_enabled
, bool, 0400);
64 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
65 sizeof(struct sg_table))
66 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
67 dma_get_cache_alignment())
69 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
72 #define HNS3_INNER_VLAN_TAG 1
73 #define HNS3_OUTER_VLAN_TAG 2
75 #define HNS3_MIN_TX_LEN 33U
76 #define HNS3_MIN_TUN_PKT_LEN 65U
78 /* hns3_pci_tbl - PCI Device ID Table
80 * Last entry must be all 0s
82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
83 * Class, Class Mask, private data (not used) }
85 static const struct pci_device_id hns3_pci_tbl
[] = {
86 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
87 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
88 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
89 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
90 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
91 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
92 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
93 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
94 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
95 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
96 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
97 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
98 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_200G_RDMA
),
99 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
100 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_VF
), 0},
101 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_RDMA_DCB_PFC_VF
),
102 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
103 /* required last entry */
106 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
108 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
116 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
117 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
120 static const struct hns3_rx_ptype hns3_rx_ptype_tbl
[] = {
121 HNS3_RX_PTYPE_UNUSED_ENTRY(0),
122 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE
, ARP
, PKT_HASH_TYPE_NONE
),
123 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE
, RARP
, PKT_HASH_TYPE_NONE
),
124 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE
, LLDP
, PKT_HASH_TYPE_NONE
),
125 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
126 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
127 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
128 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE
, CNM
, PKT_HASH_TYPE_NONE
),
129 HNS3_RX_PTYPE_ENTRY(8, 0, NONE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
130 HNS3_RX_PTYPE_UNUSED_ENTRY(9),
131 HNS3_RX_PTYPE_UNUSED_ENTRY(10),
132 HNS3_RX_PTYPE_UNUSED_ENTRY(11),
133 HNS3_RX_PTYPE_UNUSED_ENTRY(12),
134 HNS3_RX_PTYPE_UNUSED_ENTRY(13),
135 HNS3_RX_PTYPE_UNUSED_ENTRY(14),
136 HNS3_RX_PTYPE_UNUSED_ENTRY(15),
137 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
138 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_NONE
),
139 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_NONE
),
140 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
141 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
142 HNS3_RX_PTYPE_ENTRY(21, 0, NONE
, IPV4
, PKT_HASH_TYPE_NONE
),
143 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
144 HNS3_RX_PTYPE_ENTRY(23, 0, NONE
, IPV4
, PKT_HASH_TYPE_L3
),
145 HNS3_RX_PTYPE_ENTRY(24, 0, NONE
, IPV4
, PKT_HASH_TYPE_L3
),
146 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
147 HNS3_RX_PTYPE_UNUSED_ENTRY(26),
148 HNS3_RX_PTYPE_UNUSED_ENTRY(27),
149 HNS3_RX_PTYPE_UNUSED_ENTRY(28),
150 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
151 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
152 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
153 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
154 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
155 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
156 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
157 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
158 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
159 HNS3_RX_PTYPE_UNUSED_ENTRY(38),
160 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
161 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
162 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
163 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
164 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
165 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
166 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
167 HNS3_RX_PTYPE_UNUSED_ENTRY(46),
168 HNS3_RX_PTYPE_UNUSED_ENTRY(47),
169 HNS3_RX_PTYPE_UNUSED_ENTRY(48),
170 HNS3_RX_PTYPE_UNUSED_ENTRY(49),
171 HNS3_RX_PTYPE_UNUSED_ENTRY(50),
172 HNS3_RX_PTYPE_UNUSED_ENTRY(51),
173 HNS3_RX_PTYPE_UNUSED_ENTRY(52),
174 HNS3_RX_PTYPE_UNUSED_ENTRY(53),
175 HNS3_RX_PTYPE_UNUSED_ENTRY(54),
176 HNS3_RX_PTYPE_UNUSED_ENTRY(55),
177 HNS3_RX_PTYPE_UNUSED_ENTRY(56),
178 HNS3_RX_PTYPE_UNUSED_ENTRY(57),
179 HNS3_RX_PTYPE_UNUSED_ENTRY(58),
180 HNS3_RX_PTYPE_UNUSED_ENTRY(59),
181 HNS3_RX_PTYPE_UNUSED_ENTRY(60),
182 HNS3_RX_PTYPE_UNUSED_ENTRY(61),
183 HNS3_RX_PTYPE_UNUSED_ENTRY(62),
184 HNS3_RX_PTYPE_UNUSED_ENTRY(63),
185 HNS3_RX_PTYPE_UNUSED_ENTRY(64),
186 HNS3_RX_PTYPE_UNUSED_ENTRY(65),
187 HNS3_RX_PTYPE_UNUSED_ENTRY(66),
188 HNS3_RX_PTYPE_UNUSED_ENTRY(67),
189 HNS3_RX_PTYPE_UNUSED_ENTRY(68),
190 HNS3_RX_PTYPE_UNUSED_ENTRY(69),
191 HNS3_RX_PTYPE_UNUSED_ENTRY(70),
192 HNS3_RX_PTYPE_UNUSED_ENTRY(71),
193 HNS3_RX_PTYPE_UNUSED_ENTRY(72),
194 HNS3_RX_PTYPE_UNUSED_ENTRY(73),
195 HNS3_RX_PTYPE_UNUSED_ENTRY(74),
196 HNS3_RX_PTYPE_UNUSED_ENTRY(75),
197 HNS3_RX_PTYPE_UNUSED_ENTRY(76),
198 HNS3_RX_PTYPE_UNUSED_ENTRY(77),
199 HNS3_RX_PTYPE_UNUSED_ENTRY(78),
200 HNS3_RX_PTYPE_UNUSED_ENTRY(79),
201 HNS3_RX_PTYPE_UNUSED_ENTRY(80),
202 HNS3_RX_PTYPE_UNUSED_ENTRY(81),
203 HNS3_RX_PTYPE_UNUSED_ENTRY(82),
204 HNS3_RX_PTYPE_UNUSED_ENTRY(83),
205 HNS3_RX_PTYPE_UNUSED_ENTRY(84),
206 HNS3_RX_PTYPE_UNUSED_ENTRY(85),
207 HNS3_RX_PTYPE_UNUSED_ENTRY(86),
208 HNS3_RX_PTYPE_UNUSED_ENTRY(87),
209 HNS3_RX_PTYPE_UNUSED_ENTRY(88),
210 HNS3_RX_PTYPE_UNUSED_ENTRY(89),
211 HNS3_RX_PTYPE_UNUSED_ENTRY(90),
212 HNS3_RX_PTYPE_UNUSED_ENTRY(91),
213 HNS3_RX_PTYPE_UNUSED_ENTRY(92),
214 HNS3_RX_PTYPE_UNUSED_ENTRY(93),
215 HNS3_RX_PTYPE_UNUSED_ENTRY(94),
216 HNS3_RX_PTYPE_UNUSED_ENTRY(95),
217 HNS3_RX_PTYPE_UNUSED_ENTRY(96),
218 HNS3_RX_PTYPE_UNUSED_ENTRY(97),
219 HNS3_RX_PTYPE_UNUSED_ENTRY(98),
220 HNS3_RX_PTYPE_UNUSED_ENTRY(99),
221 HNS3_RX_PTYPE_UNUSED_ENTRY(100),
222 HNS3_RX_PTYPE_UNUSED_ENTRY(101),
223 HNS3_RX_PTYPE_UNUSED_ENTRY(102),
224 HNS3_RX_PTYPE_UNUSED_ENTRY(103),
225 HNS3_RX_PTYPE_UNUSED_ENTRY(104),
226 HNS3_RX_PTYPE_UNUSED_ENTRY(105),
227 HNS3_RX_PTYPE_UNUSED_ENTRY(106),
228 HNS3_RX_PTYPE_UNUSED_ENTRY(107),
229 HNS3_RX_PTYPE_UNUSED_ENTRY(108),
230 HNS3_RX_PTYPE_UNUSED_ENTRY(109),
231 HNS3_RX_PTYPE_UNUSED_ENTRY(110),
232 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
233 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
234 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
235 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
236 HNS3_RX_PTYPE_ENTRY(115, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
237 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
238 HNS3_RX_PTYPE_ENTRY(117, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
239 HNS3_RX_PTYPE_ENTRY(118, 0, NONE
, IPV6
, PKT_HASH_TYPE_L3
),
240 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
241 HNS3_RX_PTYPE_UNUSED_ENTRY(120),
242 HNS3_RX_PTYPE_UNUSED_ENTRY(121),
243 HNS3_RX_PTYPE_UNUSED_ENTRY(122),
244 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
245 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE
, PARSE_FAIL
, PKT_HASH_TYPE_NONE
),
246 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
247 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
248 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
249 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
250 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY
, IPV4
, PKT_HASH_TYPE_L4
),
251 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
252 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE
, IPV4
, PKT_HASH_TYPE_L3
),
253 HNS3_RX_PTYPE_UNUSED_ENTRY(132),
254 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
255 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
256 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
257 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
258 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY
, IPV6
, PKT_HASH_TYPE_L4
),
259 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
260 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE
, IPV6
, PKT_HASH_TYPE_L3
),
261 HNS3_RX_PTYPE_UNUSED_ENTRY(140),
262 HNS3_RX_PTYPE_UNUSED_ENTRY(141),
263 HNS3_RX_PTYPE_UNUSED_ENTRY(142),
264 HNS3_RX_PTYPE_UNUSED_ENTRY(143),
265 HNS3_RX_PTYPE_UNUSED_ENTRY(144),
266 HNS3_RX_PTYPE_UNUSED_ENTRY(145),
267 HNS3_RX_PTYPE_UNUSED_ENTRY(146),
268 HNS3_RX_PTYPE_UNUSED_ENTRY(147),
269 HNS3_RX_PTYPE_UNUSED_ENTRY(148),
270 HNS3_RX_PTYPE_UNUSED_ENTRY(149),
271 HNS3_RX_PTYPE_UNUSED_ENTRY(150),
272 HNS3_RX_PTYPE_UNUSED_ENTRY(151),
273 HNS3_RX_PTYPE_UNUSED_ENTRY(152),
274 HNS3_RX_PTYPE_UNUSED_ENTRY(153),
275 HNS3_RX_PTYPE_UNUSED_ENTRY(154),
276 HNS3_RX_PTYPE_UNUSED_ENTRY(155),
277 HNS3_RX_PTYPE_UNUSED_ENTRY(156),
278 HNS3_RX_PTYPE_UNUSED_ENTRY(157),
279 HNS3_RX_PTYPE_UNUSED_ENTRY(158),
280 HNS3_RX_PTYPE_UNUSED_ENTRY(159),
281 HNS3_RX_PTYPE_UNUSED_ENTRY(160),
282 HNS3_RX_PTYPE_UNUSED_ENTRY(161),
283 HNS3_RX_PTYPE_UNUSED_ENTRY(162),
284 HNS3_RX_PTYPE_UNUSED_ENTRY(163),
285 HNS3_RX_PTYPE_UNUSED_ENTRY(164),
286 HNS3_RX_PTYPE_UNUSED_ENTRY(165),
287 HNS3_RX_PTYPE_UNUSED_ENTRY(166),
288 HNS3_RX_PTYPE_UNUSED_ENTRY(167),
289 HNS3_RX_PTYPE_UNUSED_ENTRY(168),
290 HNS3_RX_PTYPE_UNUSED_ENTRY(169),
291 HNS3_RX_PTYPE_UNUSED_ENTRY(170),
292 HNS3_RX_PTYPE_UNUSED_ENTRY(171),
293 HNS3_RX_PTYPE_UNUSED_ENTRY(172),
294 HNS3_RX_PTYPE_UNUSED_ENTRY(173),
295 HNS3_RX_PTYPE_UNUSED_ENTRY(174),
296 HNS3_RX_PTYPE_UNUSED_ENTRY(175),
297 HNS3_RX_PTYPE_UNUSED_ENTRY(176),
298 HNS3_RX_PTYPE_UNUSED_ENTRY(177),
299 HNS3_RX_PTYPE_UNUSED_ENTRY(178),
300 HNS3_RX_PTYPE_UNUSED_ENTRY(179),
301 HNS3_RX_PTYPE_UNUSED_ENTRY(180),
302 HNS3_RX_PTYPE_UNUSED_ENTRY(181),
303 HNS3_RX_PTYPE_UNUSED_ENTRY(182),
304 HNS3_RX_PTYPE_UNUSED_ENTRY(183),
305 HNS3_RX_PTYPE_UNUSED_ENTRY(184),
306 HNS3_RX_PTYPE_UNUSED_ENTRY(185),
307 HNS3_RX_PTYPE_UNUSED_ENTRY(186),
308 HNS3_RX_PTYPE_UNUSED_ENTRY(187),
309 HNS3_RX_PTYPE_UNUSED_ENTRY(188),
310 HNS3_RX_PTYPE_UNUSED_ENTRY(189),
311 HNS3_RX_PTYPE_UNUSED_ENTRY(190),
312 HNS3_RX_PTYPE_UNUSED_ENTRY(191),
313 HNS3_RX_PTYPE_UNUSED_ENTRY(192),
314 HNS3_RX_PTYPE_UNUSED_ENTRY(193),
315 HNS3_RX_PTYPE_UNUSED_ENTRY(194),
316 HNS3_RX_PTYPE_UNUSED_ENTRY(195),
317 HNS3_RX_PTYPE_UNUSED_ENTRY(196),
318 HNS3_RX_PTYPE_UNUSED_ENTRY(197),
319 HNS3_RX_PTYPE_UNUSED_ENTRY(198),
320 HNS3_RX_PTYPE_UNUSED_ENTRY(199),
321 HNS3_RX_PTYPE_UNUSED_ENTRY(200),
322 HNS3_RX_PTYPE_UNUSED_ENTRY(201),
323 HNS3_RX_PTYPE_UNUSED_ENTRY(202),
324 HNS3_RX_PTYPE_UNUSED_ENTRY(203),
325 HNS3_RX_PTYPE_UNUSED_ENTRY(204),
326 HNS3_RX_PTYPE_UNUSED_ENTRY(205),
327 HNS3_RX_PTYPE_UNUSED_ENTRY(206),
328 HNS3_RX_PTYPE_UNUSED_ENTRY(207),
329 HNS3_RX_PTYPE_UNUSED_ENTRY(208),
330 HNS3_RX_PTYPE_UNUSED_ENTRY(209),
331 HNS3_RX_PTYPE_UNUSED_ENTRY(210),
332 HNS3_RX_PTYPE_UNUSED_ENTRY(211),
333 HNS3_RX_PTYPE_UNUSED_ENTRY(212),
334 HNS3_RX_PTYPE_UNUSED_ENTRY(213),
335 HNS3_RX_PTYPE_UNUSED_ENTRY(214),
336 HNS3_RX_PTYPE_UNUSED_ENTRY(215),
337 HNS3_RX_PTYPE_UNUSED_ENTRY(216),
338 HNS3_RX_PTYPE_UNUSED_ENTRY(217),
339 HNS3_RX_PTYPE_UNUSED_ENTRY(218),
340 HNS3_RX_PTYPE_UNUSED_ENTRY(219),
341 HNS3_RX_PTYPE_UNUSED_ENTRY(220),
342 HNS3_RX_PTYPE_UNUSED_ENTRY(221),
343 HNS3_RX_PTYPE_UNUSED_ENTRY(222),
344 HNS3_RX_PTYPE_UNUSED_ENTRY(223),
345 HNS3_RX_PTYPE_UNUSED_ENTRY(224),
346 HNS3_RX_PTYPE_UNUSED_ENTRY(225),
347 HNS3_RX_PTYPE_UNUSED_ENTRY(226),
348 HNS3_RX_PTYPE_UNUSED_ENTRY(227),
349 HNS3_RX_PTYPE_UNUSED_ENTRY(228),
350 HNS3_RX_PTYPE_UNUSED_ENTRY(229),
351 HNS3_RX_PTYPE_UNUSED_ENTRY(230),
352 HNS3_RX_PTYPE_UNUSED_ENTRY(231),
353 HNS3_RX_PTYPE_UNUSED_ENTRY(232),
354 HNS3_RX_PTYPE_UNUSED_ENTRY(233),
355 HNS3_RX_PTYPE_UNUSED_ENTRY(234),
356 HNS3_RX_PTYPE_UNUSED_ENTRY(235),
357 HNS3_RX_PTYPE_UNUSED_ENTRY(236),
358 HNS3_RX_PTYPE_UNUSED_ENTRY(237),
359 HNS3_RX_PTYPE_UNUSED_ENTRY(238),
360 HNS3_RX_PTYPE_UNUSED_ENTRY(239),
361 HNS3_RX_PTYPE_UNUSED_ENTRY(240),
362 HNS3_RX_PTYPE_UNUSED_ENTRY(241),
363 HNS3_RX_PTYPE_UNUSED_ENTRY(242),
364 HNS3_RX_PTYPE_UNUSED_ENTRY(243),
365 HNS3_RX_PTYPE_UNUSED_ENTRY(244),
366 HNS3_RX_PTYPE_UNUSED_ENTRY(245),
367 HNS3_RX_PTYPE_UNUSED_ENTRY(246),
368 HNS3_RX_PTYPE_UNUSED_ENTRY(247),
369 HNS3_RX_PTYPE_UNUSED_ENTRY(248),
370 HNS3_RX_PTYPE_UNUSED_ENTRY(249),
371 HNS3_RX_PTYPE_UNUSED_ENTRY(250),
372 HNS3_RX_PTYPE_UNUSED_ENTRY(251),
373 HNS3_RX_PTYPE_UNUSED_ENTRY(252),
374 HNS3_RX_PTYPE_UNUSED_ENTRY(253),
375 HNS3_RX_PTYPE_UNUSED_ENTRY(254),
376 HNS3_RX_PTYPE_UNUSED_ENTRY(255),
379 #define HNS3_INVALID_PTYPE \
380 ARRAY_SIZE(hns3_rx_ptype_tbl)
382 static irqreturn_t
hns3_irq_handle(int irq
, void *vector
)
384 struct hns3_enet_tqp_vector
*tqp_vector
= vector
;
386 napi_schedule_irqoff(&tqp_vector
->napi
);
387 tqp_vector
->event_cnt
++;
392 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
394 struct hns3_enet_tqp_vector
*tqp_vectors
;
397 for (i
= 0; i
< priv
->vector_num
; i
++) {
398 tqp_vectors
= &priv
->tqp_vector
[i
];
400 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
403 /* clear the affinity mask */
404 irq_set_affinity_hint(tqp_vectors
->vector_irq
, NULL
);
406 /* release the irq resource */
407 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
408 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
412 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
414 struct hns3_enet_tqp_vector
*tqp_vectors
;
415 int txrx_int_idx
= 0;
421 for (i
= 0; i
< priv
->vector_num
; i
++) {
422 tqp_vectors
= &priv
->tqp_vector
[i
];
424 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
427 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
428 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
429 "%s-%s-%s-%d", hns3_driver_name
,
430 pci_name(priv
->ae_handle
->pdev
),
431 "TxRx", txrx_int_idx
++);
433 } else if (tqp_vectors
->rx_group
.ring
) {
434 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
435 "%s-%s-%s-%d", hns3_driver_name
,
436 pci_name(priv
->ae_handle
->pdev
),
438 } else if (tqp_vectors
->tx_group
.ring
) {
439 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
,
440 "%s-%s-%s-%d", hns3_driver_name
,
441 pci_name(priv
->ae_handle
->pdev
),
444 /* Skip this unused q_vector */
448 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
450 irq_set_status_flags(tqp_vectors
->vector_irq
, IRQ_NOAUTOEN
);
451 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
452 tqp_vectors
->name
, tqp_vectors
);
454 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
455 tqp_vectors
->vector_irq
);
456 hns3_nic_uninit_irq(priv
);
460 irq_set_affinity_hint(tqp_vectors
->vector_irq
,
461 &tqp_vectors
->affinity_mask
);
463 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
469 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
472 writel(mask_en
, tqp_vector
->mask_addr
);
475 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
477 napi_enable(&tqp_vector
->napi
);
478 enable_irq(tqp_vector
->vector_irq
);
481 hns3_mask_vector_irq(tqp_vector
, 1);
484 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
487 hns3_mask_vector_irq(tqp_vector
, 0);
489 disable_irq(tqp_vector
->vector_irq
);
490 napi_disable(&tqp_vector
->napi
);
491 cancel_work_sync(&tqp_vector
->rx_group
.dim
.work
);
492 cancel_work_sync(&tqp_vector
->tx_group
.dim
.work
);
495 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
498 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
500 /* this defines the configuration for RL (Interrupt Rate Limiter).
501 * Rl defines rate of interrupts i.e. number of interrupts-per-second
502 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
504 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.adapt_enable
&&
505 !tqp_vector
->rx_group
.coal
.adapt_enable
)
506 /* According to the hardware, the range of rl_reg is
507 * 0-59 and the unit is 4.
509 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
511 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
514 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
519 if (tqp_vector
->rx_group
.coal
.unit_1us
)
520 new_val
= gl_value
| HNS3_INT_GL_1US
;
522 new_val
= hns3_gl_usec_to_reg(gl_value
);
524 writel(new_val
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
527 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
532 if (tqp_vector
->tx_group
.coal
.unit_1us
)
533 new_val
= gl_value
| HNS3_INT_GL_1US
;
535 new_val
= hns3_gl_usec_to_reg(gl_value
);
537 writel(new_val
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
540 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector
*tqp_vector
,
543 writel(ql_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_TX_QL_OFFSET
);
546 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector
*tqp_vector
,
549 writel(ql_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RX_QL_OFFSET
);
552 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector
*tqp_vector
,
553 struct hns3_nic_priv
*priv
)
555 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
556 struct hns3_enet_coalesce
*tx_coal
= &tqp_vector
->tx_group
.coal
;
557 struct hns3_enet_coalesce
*rx_coal
= &tqp_vector
->rx_group
.coal
;
558 struct hns3_enet_coalesce
*ptx_coal
= &priv
->tx_coal
;
559 struct hns3_enet_coalesce
*prx_coal
= &priv
->rx_coal
;
561 tx_coal
->adapt_enable
= ptx_coal
->adapt_enable
;
562 rx_coal
->adapt_enable
= prx_coal
->adapt_enable
;
564 tx_coal
->int_gl
= ptx_coal
->int_gl
;
565 rx_coal
->int_gl
= prx_coal
->int_gl
;
567 rx_coal
->flow_level
= prx_coal
->flow_level
;
568 tx_coal
->flow_level
= ptx_coal
->flow_level
;
570 /* device version above V3(include V3), GL can configure 1us
571 * unit, so uses 1us unit.
573 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
) {
574 tx_coal
->unit_1us
= 1;
575 rx_coal
->unit_1us
= 1;
578 if (ae_dev
->dev_specs
.int_ql_max
) {
579 tx_coal
->ql_enable
= 1;
580 rx_coal
->ql_enable
= 1;
581 tx_coal
->int_ql_max
= ae_dev
->dev_specs
.int_ql_max
;
582 rx_coal
->int_ql_max
= ae_dev
->dev_specs
.int_ql_max
;
583 tx_coal
->int_ql
= ptx_coal
->int_ql
;
584 rx_coal
->int_ql
= prx_coal
->int_ql
;
589 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
590 struct hns3_nic_priv
*priv
)
592 struct hns3_enet_coalesce
*tx_coal
= &tqp_vector
->tx_group
.coal
;
593 struct hns3_enet_coalesce
*rx_coal
= &tqp_vector
->rx_group
.coal
;
594 struct hnae3_handle
*h
= priv
->ae_handle
;
596 hns3_set_vector_coalesce_tx_gl(tqp_vector
, tx_coal
->int_gl
);
597 hns3_set_vector_coalesce_rx_gl(tqp_vector
, rx_coal
->int_gl
);
598 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
600 if (tx_coal
->ql_enable
)
601 hns3_set_vector_coalesce_tx_ql(tqp_vector
, tx_coal
->int_ql
);
603 if (rx_coal
->ql_enable
)
604 hns3_set_vector_coalesce_rx_ql(tqp_vector
, rx_coal
->int_ql
);
607 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
609 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
610 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
611 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
612 unsigned int queue_size
= kinfo
->num_tqps
;
615 if (tc_info
->num_tc
<= 1 && !tc_info
->mqprio_active
) {
616 netdev_reset_tc(netdev
);
618 ret
= netdev_set_num_tc(netdev
, tc_info
->num_tc
);
621 "netdev_set_num_tc fail, ret=%d!\n", ret
);
625 for (i
= 0; i
< tc_info
->num_tc
; i
++)
626 netdev_set_tc_queue(netdev
, i
, tc_info
->tqp_count
[i
],
627 tc_info
->tqp_offset
[i
]);
630 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
633 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret
);
637 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
640 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
647 u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
649 u16 alloc_tqps
, max_rss_size
, rss_size
;
651 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &alloc_tqps
, &max_rss_size
);
652 rss_size
= alloc_tqps
/ h
->kinfo
.tc_info
.num_tc
;
654 return min_t(u16
, rss_size
, max_rss_size
);
657 static void hns3_tqp_enable(struct hnae3_queue
*tqp
)
661 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
662 rcb_reg
|= BIT(HNS3_RING_EN_B
);
663 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
666 static void hns3_tqp_disable(struct hnae3_queue
*tqp
)
670 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
671 rcb_reg
&= ~BIT(HNS3_RING_EN_B
);
672 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
675 static void hns3_free_rx_cpu_rmap(struct net_device
*netdev
)
677 #ifdef CONFIG_RFS_ACCEL
678 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
679 netdev
->rx_cpu_rmap
= NULL
;
683 static int hns3_set_rx_cpu_rmap(struct net_device
*netdev
)
685 #ifdef CONFIG_RFS_ACCEL
686 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
687 struct hns3_enet_tqp_vector
*tqp_vector
;
690 if (!netdev
->rx_cpu_rmap
) {
691 netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->vector_num
);
692 if (!netdev
->rx_cpu_rmap
)
696 for (i
= 0; i
< priv
->vector_num
; i
++) {
697 tqp_vector
= &priv
->tqp_vector
[i
];
698 ret
= irq_cpu_rmap_add(netdev
->rx_cpu_rmap
,
699 tqp_vector
->vector_irq
);
701 hns3_free_rx_cpu_rmap(netdev
);
709 static int hns3_nic_net_up(struct net_device
*netdev
)
711 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
712 struct hnae3_handle
*h
= priv
->ae_handle
;
716 ret
= hns3_nic_reset_all_ring(h
);
720 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
722 /* enable the vectors */
723 for (i
= 0; i
< priv
->vector_num
; i
++)
724 hns3_vector_enable(&priv
->tqp_vector
[i
]);
727 for (j
= 0; j
< h
->kinfo
.num_tqps
; j
++)
728 hns3_tqp_enable(h
->kinfo
.tqp
[j
]);
730 /* start the ae_dev */
731 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
733 set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
735 hns3_tqp_disable(h
->kinfo
.tqp
[j
]);
737 for (j
= i
- 1; j
>= 0; j
--)
738 hns3_vector_disable(&priv
->tqp_vector
[j
]);
744 static void hns3_config_xps(struct hns3_nic_priv
*priv
)
748 for (i
= 0; i
< priv
->vector_num
; i
++) {
749 struct hns3_enet_tqp_vector
*tqp_vector
= &priv
->tqp_vector
[i
];
750 struct hns3_enet_ring
*ring
= tqp_vector
->tx_group
.ring
;
755 ret
= netif_set_xps_queue(priv
->netdev
,
756 &tqp_vector
->affinity_mask
,
757 ring
->tqp
->tqp_index
);
759 netdev_warn(priv
->netdev
,
760 "set xps queue failed: %d", ret
);
767 static int hns3_nic_net_open(struct net_device
*netdev
)
769 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
770 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
771 struct hnae3_knic_private_info
*kinfo
;
774 if (hns3_nic_resetting(netdev
))
777 if (!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
778 netdev_warn(netdev
, "net open repeatedly!\n");
782 netif_carrier_off(netdev
);
784 ret
= hns3_nic_set_real_num_queue(netdev
);
788 ret
= hns3_nic_net_up(netdev
);
790 netdev_err(netdev
, "net up fail, ret=%d!\n", ret
);
795 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
796 netdev_set_prio_tc_map(netdev
, i
, kinfo
->tc_info
.prio_tc
[i
]);
798 if (h
->ae_algo
->ops
->set_timer_task
)
799 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, true);
801 hns3_config_xps(priv
);
803 netif_dbg(h
, drv
, netdev
, "net open\n");
808 static void hns3_reset_tx_queue(struct hnae3_handle
*h
)
810 struct net_device
*ndev
= h
->kinfo
.netdev
;
811 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
812 struct netdev_queue
*dev_queue
;
815 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
816 dev_queue
= netdev_get_tx_queue(ndev
,
817 priv
->ring
[i
].queue_index
);
818 netdev_tx_reset_queue(dev_queue
);
822 static void hns3_nic_net_down(struct net_device
*netdev
)
824 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
825 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
826 const struct hnae3_ae_ops
*ops
;
829 /* disable vectors */
830 for (i
= 0; i
< priv
->vector_num
; i
++)
831 hns3_vector_disable(&priv
->tqp_vector
[i
]);
834 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
835 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
838 ops
= priv
->ae_handle
->ae_algo
->ops
;
840 ops
->stop(priv
->ae_handle
);
842 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
843 * during reset process, because driver may not be able
844 * to disable the ring through firmware when downing the netdev.
846 if (!hns3_nic_resetting(netdev
))
847 hns3_clear_all_ring(priv
->ae_handle
, false);
849 hns3_reset_tx_queue(priv
->ae_handle
);
852 static int hns3_nic_net_stop(struct net_device
*netdev
)
854 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
855 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
857 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
860 netif_dbg(h
, drv
, netdev
, "net stop\n");
862 if (h
->ae_algo
->ops
->set_timer_task
)
863 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, false);
865 netif_carrier_off(netdev
);
866 netif_tx_disable(netdev
);
868 hns3_nic_net_down(netdev
);
873 static int hns3_nic_uc_sync(struct net_device
*netdev
,
874 const unsigned char *addr
)
876 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
878 if (h
->ae_algo
->ops
->add_uc_addr
)
879 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
884 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
885 const unsigned char *addr
)
887 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
889 /* need ignore the request of removing device address, because
890 * we store the device address and other addresses of uc list
891 * in the function's mac filter list.
893 if (ether_addr_equal(addr
, netdev
->dev_addr
))
896 if (h
->ae_algo
->ops
->rm_uc_addr
)
897 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
902 static int hns3_nic_mc_sync(struct net_device
*netdev
,
903 const unsigned char *addr
)
905 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
907 if (h
->ae_algo
->ops
->add_mc_addr
)
908 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
913 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
914 const unsigned char *addr
)
916 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
918 if (h
->ae_algo
->ops
->rm_mc_addr
)
919 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
924 static u8
hns3_get_netdev_flags(struct net_device
*netdev
)
928 if (netdev
->flags
& IFF_PROMISC
)
929 flags
= HNAE3_USER_UPE
| HNAE3_USER_MPE
| HNAE3_BPE
;
930 else if (netdev
->flags
& IFF_ALLMULTI
)
931 flags
= HNAE3_USER_MPE
;
936 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
938 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
941 new_flags
= hns3_get_netdev_flags(netdev
);
943 __dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
);
944 __dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
);
946 /* User mode Promisc mode enable and vlan filtering is disabled to
947 * let all packets in.
949 h
->netdev_flags
= new_flags
;
950 hns3_request_update_promisc_mode(h
);
953 void hns3_request_update_promisc_mode(struct hnae3_handle
*handle
)
955 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
957 if (ops
->request_update_promisc_mode
)
958 ops
->request_update_promisc_mode(handle
);
961 static u32
hns3_tx_spare_space(struct hns3_enet_ring
*ring
)
963 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
966 /* This smp_load_acquire() pairs with smp_store_release() in
967 * hns3_tx_spare_update() called in tx desc cleaning process.
969 ntc
= smp_load_acquire(&tx_spare
->last_to_clean
);
970 ntu
= tx_spare
->next_to_use
;
973 return ntc
- ntu
- 1;
975 /* The free tx buffer is divided into two part, so pick the
978 return max(ntc
, tx_spare
->len
- ntu
) - 1;
981 static void hns3_tx_spare_update(struct hns3_enet_ring
*ring
)
983 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
986 tx_spare
->last_to_clean
== tx_spare
->next_to_clean
)
989 /* This smp_store_release() pairs with smp_load_acquire() in
990 * hns3_tx_spare_space() called in xmit process.
992 smp_store_release(&tx_spare
->last_to_clean
,
993 tx_spare
->next_to_clean
);
996 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring
*ring
,
1000 u32 len
= skb
->len
<= ring
->tx_copybreak
? skb
->len
:
1003 if (len
> ring
->tx_copybreak
)
1006 if (ALIGN(len
, dma_get_cache_alignment()) > space
) {
1007 hns3_ring_stats_update(ring
, tx_spare_full
);
1014 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring
*ring
,
1015 struct sk_buff
*skb
,
1018 if (skb
->len
<= ring
->tx_copybreak
|| !tx_sgl
||
1019 (!skb_has_frag_list(skb
) &&
1020 skb_shinfo(skb
)->nr_frags
< tx_sgl
))
1023 if (space
< HNS3_MAX_SGL_SIZE
) {
1024 hns3_ring_stats_update(ring
, tx_spare_full
);
1031 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring
*ring
)
1033 u32 alloc_size
= ring
->tqp
->handle
->kinfo
.tx_spare_buf_size
;
1034 struct hns3_tx_spare
*tx_spare
;
1042 order
= get_order(alloc_size
);
1043 if (order
> MAX_ORDER
) {
1044 if (net_ratelimit())
1045 dev_warn(ring_to_dev(ring
), "failed to allocate tx spare buffer, exceed to max order\n");
1049 tx_spare
= devm_kzalloc(ring_to_dev(ring
), sizeof(*tx_spare
),
1052 /* The driver still work without the tx spare buffer */
1053 dev_warn(ring_to_dev(ring
), "failed to allocate hns3_tx_spare\n");
1054 goto devm_kzalloc_error
;
1057 page
= alloc_pages_node(dev_to_node(ring_to_dev(ring
)),
1060 dev_warn(ring_to_dev(ring
), "failed to allocate tx spare pages\n");
1061 goto alloc_pages_error
;
1064 dma
= dma_map_page(ring_to_dev(ring
), page
, 0,
1065 PAGE_SIZE
<< order
, DMA_TO_DEVICE
);
1066 if (dma_mapping_error(ring_to_dev(ring
), dma
)) {
1067 dev_warn(ring_to_dev(ring
), "failed to map pages for tx spare\n");
1068 goto dma_mapping_error
;
1071 tx_spare
->dma
= dma
;
1072 tx_spare
->buf
= page_address(page
);
1073 tx_spare
->len
= PAGE_SIZE
<< order
;
1074 ring
->tx_spare
= tx_spare
;
1080 devm_kfree(ring_to_dev(ring
), tx_spare
);
1082 ring
->tqp
->handle
->kinfo
.tx_spare_buf_size
= 0;
1085 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1086 * before calling below function to allocate tx buffer.
1088 static void *hns3_tx_spare_alloc(struct hns3_enet_ring
*ring
,
1089 unsigned int size
, dma_addr_t
*dma
,
1092 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1093 u32 ntu
= tx_spare
->next_to_use
;
1095 size
= ALIGN(size
, dma_get_cache_alignment());
1098 /* Tx spare buffer wraps back here because the end of
1099 * freed tx buffer is not enough.
1101 if (ntu
+ size
> tx_spare
->len
) {
1102 *cb_len
+= (tx_spare
->len
- ntu
);
1106 tx_spare
->next_to_use
= ntu
+ size
;
1107 if (tx_spare
->next_to_use
== tx_spare
->len
)
1108 tx_spare
->next_to_use
= 0;
1110 *dma
= tx_spare
->dma
+ ntu
;
1112 return tx_spare
->buf
+ ntu
;
1115 static void hns3_tx_spare_rollback(struct hns3_enet_ring
*ring
, u32 len
)
1117 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1119 if (len
> tx_spare
->next_to_use
) {
1120 len
-= tx_spare
->next_to_use
;
1121 tx_spare
->next_to_use
= tx_spare
->len
- len
;
1123 tx_spare
->next_to_use
-= len
;
1127 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring
*ring
,
1128 struct hns3_desc_cb
*cb
)
1130 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
1131 u32 ntc
= tx_spare
->next_to_clean
;
1132 u32 len
= cb
->length
;
1134 tx_spare
->next_to_clean
+= len
;
1136 if (tx_spare
->next_to_clean
>= tx_spare
->len
) {
1137 tx_spare
->next_to_clean
-= tx_spare
->len
;
1139 if (tx_spare
->next_to_clean
) {
1141 len
= tx_spare
->next_to_clean
;
1145 /* This tx spare buffer is only really reclaimed after calling
1146 * hns3_tx_spare_update(), so it is still safe to use the info in
1147 * the tx buffer to do the dma sync or sg unmapping after
1148 * tx_spare->next_to_clean is moved forword.
1150 if (cb
->type
& (DESC_TYPE_BOUNCE_HEAD
| DESC_TYPE_BOUNCE_ALL
)) {
1151 dma_addr_t dma
= tx_spare
->dma
+ ntc
;
1153 dma_sync_single_for_cpu(ring_to_dev(ring
), dma
, len
,
1156 struct sg_table
*sgt
= tx_spare
->buf
+ ntc
;
1158 dma_unmap_sg(ring_to_dev(ring
), sgt
->sgl
, sgt
->orig_nents
,
1163 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen_fdop_ol4cs
,
1164 u16
*mss
, u32
*type_cs_vlan_tso
, u32
*send_bytes
)
1166 u32 l4_offset
, hdr_len
;
1167 union l3_hdr_info l3
;
1168 union l4_hdr_info l4
;
1172 if (!skb_is_gso(skb
))
1175 ret
= skb_cow_head(skb
, 0);
1176 if (unlikely(ret
< 0))
1179 l3
.hdr
= skb_network_header(skb
);
1180 l4
.hdr
= skb_transport_header(skb
);
1182 /* Software should clear the IPv4's checksum field when tso is
1185 if (l3
.v4
->version
== 4)
1189 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
1191 SKB_GSO_UDP_TUNNEL
|
1192 SKB_GSO_UDP_TUNNEL_CSUM
)) {
1193 /* reset l3&l4 pointers from outer to inner headers */
1194 l3
.hdr
= skb_inner_network_header(skb
);
1195 l4
.hdr
= skb_inner_transport_header(skb
);
1197 /* Software should clear the IPv4's checksum field when
1200 if (l3
.v4
->version
== 4)
1204 /* normal or tunnel packet */
1205 l4_offset
= l4
.hdr
- skb
->data
;
1207 /* remove payload length from inner pseudo checksum when tso */
1208 l4_paylen
= skb
->len
- l4_offset
;
1210 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
1211 hdr_len
= sizeof(*l4
.udp
) + l4_offset
;
1212 csum_replace_by_diff(&l4
.udp
->check
,
1213 (__force __wsum
)htonl(l4_paylen
));
1215 hdr_len
= (l4
.tcp
->doff
<< 2) + l4_offset
;
1216 csum_replace_by_diff(&l4
.tcp
->check
,
1217 (__force __wsum
)htonl(l4_paylen
));
1220 *send_bytes
= (skb_shinfo(skb
)->gso_segs
- 1) * hdr_len
+ skb
->len
;
1222 /* find the txbd field values */
1223 *paylen_fdop_ol4cs
= skb
->len
- hdr_len
;
1224 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_TSO_B
, 1);
1226 /* offload outer UDP header checksum */
1227 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
)
1228 hns3_set_field(*paylen_fdop_ol4cs
, HNS3_TXD_OL4CS_B
, 1);
1230 /* get MSS for TSO */
1231 *mss
= skb_shinfo(skb
)->gso_size
;
1233 trace_hns3_tso(skb
);
1238 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
1241 union l3_hdr_info l3
;
1242 unsigned char *l4_hdr
;
1243 unsigned char *exthdr
;
1247 /* find outer header point */
1248 l3
.hdr
= skb_network_header(skb
);
1249 l4_hdr
= skb_transport_header(skb
);
1251 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1252 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
1253 l4_proto_tmp
= l3
.v6
->nexthdr
;
1254 if (l4_hdr
!= exthdr
)
1255 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1256 &l4_proto_tmp
, &frag_off
);
1257 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
1258 l4_proto_tmp
= l3
.v4
->protocol
;
1263 *ol4_proto
= l4_proto_tmp
;
1266 if (!skb
->encapsulation
) {
1271 /* find inner header point */
1272 l3
.hdr
= skb_inner_network_header(skb
);
1273 l4_hdr
= skb_inner_transport_header(skb
);
1275 if (l3
.v6
->version
== 6) {
1276 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
1277 l4_proto_tmp
= l3
.v6
->nexthdr
;
1278 if (l4_hdr
!= exthdr
)
1279 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1280 &l4_proto_tmp
, &frag_off
);
1281 } else if (l3
.v4
->version
== 4) {
1282 l4_proto_tmp
= l3
.v4
->protocol
;
1285 *il4_proto
= l4_proto_tmp
;
1290 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1291 * and it is udp packet, which has a dest port as the IANA assigned.
1292 * the hardware is expected to do the checksum offload, but the
1293 * hardware will not do the checksum offload when udp dest port is
1294 * 4789, 4790 or 6081.
1296 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
1298 struct hns3_nic_priv
*priv
= netdev_priv(skb
->dev
);
1299 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
1300 union l4_hdr_info l4
;
1302 /* device version above V3(include V3), the hardware can
1303 * do this checksum offload.
1305 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
1308 l4
.hdr
= skb_transport_header(skb
);
1310 if (!(!skb
->encapsulation
&&
1311 (l4
.udp
->dest
== htons(IANA_VXLAN_UDP_PORT
) ||
1312 l4
.udp
->dest
== htons(GENEVE_UDP_PORT
) ||
1313 l4
.udp
->dest
== htons(IANA_VXLAN_GPE_UDP_PORT
))))
1319 static void hns3_set_outer_l2l3l4(struct sk_buff
*skb
, u8 ol4_proto
,
1320 u32
*ol_type_vlan_len_msec
)
1322 u32 l2_len
, l3_len
, l4_len
;
1323 unsigned char *il2_hdr
;
1324 union l3_hdr_info l3
;
1325 union l4_hdr_info l4
;
1327 l3
.hdr
= skb_network_header(skb
);
1328 l4
.hdr
= skb_transport_header(skb
);
1330 /* compute OL2 header size, defined in 2 Bytes */
1331 l2_len
= l3
.hdr
- skb
->data
;
1332 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L2LEN_S
, l2_len
>> 1);
1334 /* compute OL3 header size, defined in 4 Bytes */
1335 l3_len
= l4
.hdr
- l3
.hdr
;
1336 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_S
, l3_len
>> 2);
1338 il2_hdr
= skb_inner_mac_header(skb
);
1339 /* compute OL4 header size, defined in 4 Bytes */
1340 l4_len
= il2_hdr
- l4
.hdr
;
1341 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_S
, l4_len
>> 2);
1343 /* define outer network header type */
1344 if (skb
->protocol
== htons(ETH_P_IP
)) {
1345 if (skb_is_gso(skb
))
1346 hns3_set_field(*ol_type_vlan_len_msec
,
1348 HNS3_OL3T_IPV4_CSUM
);
1350 hns3_set_field(*ol_type_vlan_len_msec
,
1352 HNS3_OL3T_IPV4_NO_CSUM
);
1353 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1354 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_S
,
1358 if (ol4_proto
== IPPROTO_UDP
)
1359 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_TUNTYPE_S
,
1360 HNS3_TUN_MAC_IN_UDP
);
1361 else if (ol4_proto
== IPPROTO_GRE
)
1362 hns3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_TUNTYPE_S
,
1366 static void hns3_set_l3_type(struct sk_buff
*skb
, union l3_hdr_info l3
,
1367 u32
*type_cs_vlan_tso
)
1369 if (l3
.v4
->version
== 4) {
1370 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_S
,
1373 /* the stack computes the IP header already, the only time we
1374 * need the hardware to recompute it is in the case of TSO.
1376 if (skb_is_gso(skb
))
1377 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
1378 } else if (l3
.v6
->version
== 6) {
1379 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_S
,
1384 static int hns3_set_l4_csum_length(struct sk_buff
*skb
, union l4_hdr_info l4
,
1385 u32 l4_proto
, u32
*type_cs_vlan_tso
)
1387 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1390 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1391 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1393 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1397 if (hns3_tunnel_csum_bug(skb
)) {
1398 int ret
= skb_put_padto(skb
, HNS3_MIN_TUN_PKT_LEN
);
1400 return ret
? ret
: skb_checksum_help(skb
);
1403 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1404 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1406 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1407 (sizeof(struct udphdr
) >> 2));
1410 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
1411 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4T_S
,
1413 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_S
,
1414 (sizeof(struct sctphdr
) >> 2));
1417 /* drop the skb tunnel packet if hardware don't support,
1418 * because hardware can't calculate csum when TSO.
1420 if (skb_is_gso(skb
))
1423 /* the stack computes the IP header already,
1424 * driver calculate l4 checksum when not TSO.
1426 return skb_checksum_help(skb
);
1432 static int hns3_set_l2l3l4(struct sk_buff
*skb
, u8 ol4_proto
,
1433 u8 il4_proto
, u32
*type_cs_vlan_tso
,
1434 u32
*ol_type_vlan_len_msec
)
1436 unsigned char *l2_hdr
= skb
->data
;
1437 u32 l4_proto
= ol4_proto
;
1438 union l4_hdr_info l4
;
1439 union l3_hdr_info l3
;
1442 l4
.hdr
= skb_transport_header(skb
);
1443 l3
.hdr
= skb_network_header(skb
);
1445 /* handle encapsulation skb */
1446 if (skb
->encapsulation
) {
1447 /* If this is a not UDP/GRE encapsulation skb */
1448 if (!(ol4_proto
== IPPROTO_UDP
|| ol4_proto
== IPPROTO_GRE
)) {
1449 /* drop the skb tunnel packet if hardware don't support,
1450 * because hardware can't calculate csum when TSO.
1452 if (skb_is_gso(skb
))
1455 /* the stack computes the IP header already,
1456 * driver calculate l4 checksum when not TSO.
1458 return skb_checksum_help(skb
);
1461 hns3_set_outer_l2l3l4(skb
, ol4_proto
, ol_type_vlan_len_msec
);
1463 /* switch to inner header */
1464 l2_hdr
= skb_inner_mac_header(skb
);
1465 l3
.hdr
= skb_inner_network_header(skb
);
1466 l4
.hdr
= skb_inner_transport_header(skb
);
1467 l4_proto
= il4_proto
;
1470 hns3_set_l3_type(skb
, l3
, type_cs_vlan_tso
);
1472 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1473 l2_len
= l3
.hdr
- l2_hdr
;
1474 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_S
, l2_len
>> 1);
1476 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1477 l3_len
= l4
.hdr
- l3
.hdr
;
1478 hns3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_S
, l3_len
>> 2);
1480 return hns3_set_l4_csum_length(skb
, l4
, l4_proto
, type_cs_vlan_tso
);
1483 static int hns3_handle_vtags(struct hns3_enet_ring
*tx_ring
,
1484 struct sk_buff
*skb
)
1486 struct hnae3_handle
*handle
= tx_ring
->tqp
->handle
;
1487 struct hnae3_ae_dev
*ae_dev
;
1488 struct vlan_ethhdr
*vhdr
;
1491 if (!(skb
->protocol
== htons(ETH_P_8021Q
) ||
1492 skb_vlan_tag_present(skb
)))
1495 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1496 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1497 * will cause RAS error.
1499 ae_dev
= pci_get_drvdata(handle
->pdev
);
1500 if (unlikely(skb_vlan_tagged_multi(skb
) &&
1501 ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
&&
1502 handle
->port_base_vlan_state
==
1503 HNAE3_PORT_BASE_VLAN_ENABLE
))
1506 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
1507 !(handle
->kinfo
.netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
1508 /* When HW VLAN acceleration is turned off, and the stack
1509 * sets the protocol to 802.1q, the driver just need to
1510 * set the protocol to the encapsulated ethertype.
1512 skb
->protocol
= vlan_get_protocol(skb
);
1516 if (skb_vlan_tag_present(skb
)) {
1517 /* Based on hw strategy, use out_vtag in two layer tag case,
1518 * and use inner_vtag in one tag case.
1520 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
1521 handle
->port_base_vlan_state
==
1522 HNAE3_PORT_BASE_VLAN_DISABLE
)
1523 rc
= HNS3_OUTER_VLAN_TAG
;
1525 rc
= HNS3_INNER_VLAN_TAG
;
1527 skb
->protocol
= vlan_get_protocol(skb
);
1531 rc
= skb_cow_head(skb
, 0);
1532 if (unlikely(rc
< 0))
1535 vhdr
= skb_vlan_eth_hdr(skb
);
1536 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
<< VLAN_PRIO_SHIFT
)
1539 skb
->protocol
= vlan_get_protocol(skb
);
1543 /* check if the hardware is capable of checksum offloading */
1544 static bool hns3_check_hw_tx_csum(struct sk_buff
*skb
)
1546 struct hns3_nic_priv
*priv
= netdev_priv(skb
->dev
);
1548 /* Kindly note, due to backward compatibility of the TX descriptor,
1549 * HW checksum of the non-IP packets and GSO packets is handled at
1550 * different place in the following code
1552 if (skb_csum_is_sctp(skb
) || skb_is_gso(skb
) ||
1553 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE
, &priv
->state
))
1559 struct hns3_desc_param
{
1561 u32 ol_type_vlan_len_msec
;
1562 u32 type_cs_vlan_tso
;
1568 static void hns3_init_desc_data(struct sk_buff
*skb
, struct hns3_desc_param
*pa
)
1570 pa
->paylen_ol4cs
= skb
->len
;
1571 pa
->ol_type_vlan_len_msec
= 0;
1572 pa
->type_cs_vlan_tso
= 0;
1573 pa
->mss_hw_csum
= 0;
1578 static int hns3_handle_vlan_info(struct hns3_enet_ring
*ring
,
1579 struct sk_buff
*skb
,
1580 struct hns3_desc_param
*param
)
1584 ret
= hns3_handle_vtags(ring
, skb
);
1585 if (unlikely(ret
< 0)) {
1586 hns3_ring_stats_update(ring
, tx_vlan_err
);
1588 } else if (ret
== HNS3_INNER_VLAN_TAG
) {
1589 param
->inner_vtag
= skb_vlan_tag_get(skb
);
1590 param
->inner_vtag
|= (skb
->priority
<< VLAN_PRIO_SHIFT
) &
1592 hns3_set_field(param
->type_cs_vlan_tso
, HNS3_TXD_VLAN_B
, 1);
1593 } else if (ret
== HNS3_OUTER_VLAN_TAG
) {
1594 param
->out_vtag
= skb_vlan_tag_get(skb
);
1595 param
->out_vtag
|= (skb
->priority
<< VLAN_PRIO_SHIFT
) &
1597 hns3_set_field(param
->ol_type_vlan_len_msec
, HNS3_TXD_OVLAN_B
,
1603 static int hns3_handle_csum_partial(struct hns3_enet_ring
*ring
,
1604 struct sk_buff
*skb
,
1605 struct hns3_desc_cb
*desc_cb
,
1606 struct hns3_desc_param
*param
)
1608 u8 ol4_proto
, il4_proto
;
1611 if (hns3_check_hw_tx_csum(skb
)) {
1612 /* set checksum start and offset, defined in 2 Bytes */
1613 hns3_set_field(param
->type_cs_vlan_tso
, HNS3_TXD_CSUM_START_S
,
1614 skb_checksum_start_offset(skb
) >> 1);
1615 hns3_set_field(param
->ol_type_vlan_len_msec
,
1616 HNS3_TXD_CSUM_OFFSET_S
,
1617 skb
->csum_offset
>> 1);
1618 param
->mss_hw_csum
|= BIT(HNS3_TXD_HW_CS_B
);
1622 skb_reset_mac_len(skb
);
1624 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
1625 if (unlikely(ret
< 0)) {
1626 hns3_ring_stats_update(ring
, tx_l4_proto_err
);
1630 ret
= hns3_set_l2l3l4(skb
, ol4_proto
, il4_proto
,
1631 ¶m
->type_cs_vlan_tso
,
1632 ¶m
->ol_type_vlan_len_msec
);
1633 if (unlikely(ret
< 0)) {
1634 hns3_ring_stats_update(ring
, tx_l2l3l4_err
);
1638 ret
= hns3_set_tso(skb
, ¶m
->paylen_ol4cs
, ¶m
->mss_hw_csum
,
1639 ¶m
->type_cs_vlan_tso
, &desc_cb
->send_bytes
);
1640 if (unlikely(ret
< 0)) {
1641 hns3_ring_stats_update(ring
, tx_tso_err
);
1647 static int hns3_fill_skb_desc(struct hns3_enet_ring
*ring
,
1648 struct sk_buff
*skb
, struct hns3_desc
*desc
,
1649 struct hns3_desc_cb
*desc_cb
)
1651 struct hns3_desc_param param
;
1654 hns3_init_desc_data(skb
, ¶m
);
1655 ret
= hns3_handle_vlan_info(ring
, skb
, ¶m
);
1656 if (unlikely(ret
< 0))
1659 desc_cb
->send_bytes
= skb
->len
;
1661 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1662 ret
= hns3_handle_csum_partial(ring
, skb
, desc_cb
, ¶m
);
1668 desc
->tx
.ol_type_vlan_len_msec
=
1669 cpu_to_le32(param
.ol_type_vlan_len_msec
);
1670 desc
->tx
.type_cs_vlan_tso_len
= cpu_to_le32(param
.type_cs_vlan_tso
);
1671 desc
->tx
.paylen_ol4cs
= cpu_to_le32(param
.paylen_ol4cs
);
1672 desc
->tx
.mss_hw_csum
= cpu_to_le16(param
.mss_hw_csum
);
1673 desc
->tx
.vlan_tag
= cpu_to_le16(param
.inner_vtag
);
1674 desc
->tx
.outer_vlan_tag
= cpu_to_le16(param
.out_vtag
);
1679 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, dma_addr_t dma
,
1682 #define HNS3_LIKELY_BD_NUM 1
1684 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1685 unsigned int frag_buf_num
;
1688 if (likely(size
<= HNS3_MAX_BD_SIZE
)) {
1689 desc
->addr
= cpu_to_le64(dma
);
1690 desc
->tx
.send_size
= cpu_to_le16(size
);
1691 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1692 cpu_to_le16(BIT(HNS3_TXD_VLD_B
));
1694 trace_hns3_tx_desc(ring
, ring
->next_to_use
);
1695 ring_ptr_move_fw(ring
, next_to_use
);
1696 return HNS3_LIKELY_BD_NUM
;
1699 frag_buf_num
= hns3_tx_bd_count(size
);
1700 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
1701 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
1703 /* When frag size is bigger than hardware limit, split this frag */
1704 for (k
= 0; k
< frag_buf_num
; k
++) {
1705 /* now, fill the descriptor */
1706 desc
->addr
= cpu_to_le64(dma
+ HNS3_MAX_BD_SIZE
* k
);
1707 desc
->tx
.send_size
= cpu_to_le16((k
== frag_buf_num
- 1) ?
1708 (u16
)sizeoflast
: (u16
)HNS3_MAX_BD_SIZE
);
1709 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1710 cpu_to_le16(BIT(HNS3_TXD_VLD_B
));
1712 trace_hns3_tx_desc(ring
, ring
->next_to_use
);
1713 /* move ring pointer to next */
1714 ring_ptr_move_fw(ring
, next_to_use
);
1716 desc
= &ring
->desc
[ring
->next_to_use
];
1719 return frag_buf_num
;
1722 static int hns3_map_and_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
1725 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1726 struct device
*dev
= ring_to_dev(ring
);
1730 if (type
& (DESC_TYPE_FRAGLIST_SKB
| DESC_TYPE_SKB
)) {
1731 struct sk_buff
*skb
= (struct sk_buff
*)priv
;
1733 size
= skb_headlen(skb
);
1737 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1738 } else if (type
& DESC_TYPE_BOUNCE_HEAD
) {
1739 /* Head data has been filled in hns3_handle_tx_bounce(),
1740 * just return 0 here.
1744 skb_frag_t
*frag
= (skb_frag_t
*)priv
;
1746 size
= skb_frag_size(frag
);
1750 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1753 if (unlikely(dma_mapping_error(dev
, dma
))) {
1754 hns3_ring_stats_update(ring
, sw_err_cnt
);
1758 desc_cb
->priv
= priv
;
1759 desc_cb
->length
= size
;
1761 desc_cb
->type
= type
;
1763 return hns3_fill_desc(ring
, dma
, size
);
1766 static unsigned int hns3_skb_bd_num(struct sk_buff
*skb
, unsigned int *bd_size
,
1767 unsigned int bd_num
)
1772 size
= skb_headlen(skb
);
1773 while (size
> HNS3_MAX_BD_SIZE
) {
1774 bd_size
[bd_num
++] = HNS3_MAX_BD_SIZE
;
1775 size
-= HNS3_MAX_BD_SIZE
;
1777 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1782 bd_size
[bd_num
++] = size
;
1783 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1787 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1788 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1789 size
= skb_frag_size(frag
);
1793 while (size
> HNS3_MAX_BD_SIZE
) {
1794 bd_size
[bd_num
++] = HNS3_MAX_BD_SIZE
;
1795 size
-= HNS3_MAX_BD_SIZE
;
1797 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1801 bd_size
[bd_num
++] = size
;
1802 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1809 static unsigned int hns3_tx_bd_num(struct sk_buff
*skb
, unsigned int *bd_size
,
1810 u8 max_non_tso_bd_num
, unsigned int bd_num
,
1811 unsigned int recursion_level
)
1813 #define HNS3_MAX_RECURSION_LEVEL 24
1815 struct sk_buff
*frag_skb
;
1817 /* If the total len is within the max bd limit */
1818 if (likely(skb
->len
<= HNS3_MAX_BD_SIZE
&& !recursion_level
&&
1819 !skb_has_frag_list(skb
) &&
1820 skb_shinfo(skb
)->nr_frags
< max_non_tso_bd_num
))
1821 return skb_shinfo(skb
)->nr_frags
+ 1U;
1823 if (unlikely(recursion_level
>= HNS3_MAX_RECURSION_LEVEL
))
1826 bd_num
= hns3_skb_bd_num(skb
, bd_size
, bd_num
);
1827 if (!skb_has_frag_list(skb
) || bd_num
> HNS3_MAX_TSO_BD_NUM
)
1830 skb_walk_frags(skb
, frag_skb
) {
1831 bd_num
= hns3_tx_bd_num(frag_skb
, bd_size
, max_non_tso_bd_num
,
1832 bd_num
, recursion_level
+ 1);
1833 if (bd_num
> HNS3_MAX_TSO_BD_NUM
)
1840 static unsigned int hns3_gso_hdr_len(struct sk_buff
*skb
)
1842 if (!skb
->encapsulation
)
1843 return skb_tcp_all_headers(skb
);
1845 return skb_inner_tcp_all_headers(skb
);
1848 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1849 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1850 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1851 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1852 * than MSS except the last max_non_tso_bd_num - 1 frags.
1854 static bool hns3_skb_need_linearized(struct sk_buff
*skb
, unsigned int *bd_size
,
1855 unsigned int bd_num
, u8 max_non_tso_bd_num
)
1857 unsigned int tot_len
= 0;
1860 for (i
= 0; i
< max_non_tso_bd_num
- 1U; i
++)
1861 tot_len
+= bd_size
[i
];
1863 /* ensure the first max_non_tso_bd_num frags is greater than
1866 if (tot_len
+ bd_size
[max_non_tso_bd_num
- 1U] <
1867 skb_shinfo(skb
)->gso_size
+ hns3_gso_hdr_len(skb
))
1870 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1871 * than mss except the last one.
1873 for (i
= 0; i
< bd_num
- max_non_tso_bd_num
; i
++) {
1874 tot_len
-= bd_size
[i
];
1875 tot_len
+= bd_size
[i
+ max_non_tso_bd_num
- 1U];
1877 if (tot_len
< skb_shinfo(skb
)->gso_size
)
1884 void hns3_shinfo_pack(struct skb_shared_info
*shinfo
, __u32
*size
)
1888 for (i
= 0; i
< MAX_SKB_FRAGS
; i
++)
1889 size
[i
] = skb_frag_size(&shinfo
->frags
[i
]);
1892 static int hns3_skb_linearize(struct hns3_enet_ring
*ring
,
1893 struct sk_buff
*skb
,
1894 unsigned int bd_num
)
1896 /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1897 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1899 if (bd_num
== UINT_MAX
) {
1900 hns3_ring_stats_update(ring
, over_max_recursion
);
1904 /* The skb->len has exceeded the hw limitation, linearization
1907 if (skb
->len
> HNS3_MAX_TSO_SIZE
||
1908 (!skb_is_gso(skb
) && skb
->len
> HNS3_MAX_NON_TSO_SIZE
)) {
1909 hns3_ring_stats_update(ring
, hw_limitation
);
1913 if (__skb_linearize(skb
)) {
1914 hns3_ring_stats_update(ring
, sw_err_cnt
);
1921 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring
*ring
,
1922 struct net_device
*netdev
,
1923 struct sk_buff
*skb
)
1925 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1926 u8 max_non_tso_bd_num
= priv
->max_non_tso_bd_num
;
1927 unsigned int bd_size
[HNS3_MAX_TSO_BD_NUM
+ 1U];
1928 unsigned int bd_num
;
1930 bd_num
= hns3_tx_bd_num(skb
, bd_size
, max_non_tso_bd_num
, 0, 0);
1931 if (unlikely(bd_num
> max_non_tso_bd_num
)) {
1932 if (bd_num
<= HNS3_MAX_TSO_BD_NUM
&& skb_is_gso(skb
) &&
1933 !hns3_skb_need_linearized(skb
, bd_size
, bd_num
,
1934 max_non_tso_bd_num
)) {
1935 trace_hns3_over_max_bd(skb
);
1939 if (hns3_skb_linearize(ring
, skb
, bd_num
))
1942 bd_num
= hns3_tx_bd_count(skb
->len
);
1944 hns3_ring_stats_update(ring
, tx_copy
);
1948 if (likely(ring_space(ring
) >= bd_num
))
1951 netif_stop_subqueue(netdev
, ring
->queue_index
);
1952 smp_mb(); /* Memory barrier before checking ring_space */
1954 /* Start queue in case hns3_clean_tx_ring has just made room
1955 * available and has not seen the queue stopped state performed
1956 * by netif_stop_subqueue above.
1958 if (ring_space(ring
) >= bd_num
&& netif_carrier_ok(netdev
) &&
1959 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
1960 netif_start_subqueue(netdev
, ring
->queue_index
);
1964 hns3_ring_stats_update(ring
, tx_busy
);
1969 static void hns3_clear_desc(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1971 struct device
*dev
= ring_to_dev(ring
);
1974 for (i
= 0; i
< ring
->desc_num
; i
++) {
1975 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1976 struct hns3_desc_cb
*desc_cb
;
1978 memset(desc
, 0, sizeof(*desc
));
1980 /* check if this is where we started */
1981 if (ring
->next_to_use
== next_to_use_orig
)
1985 ring_ptr_move_bw(ring
, next_to_use
);
1987 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1992 /* unmap the descriptor dma address */
1993 if (desc_cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_FRAGLIST_SKB
))
1994 dma_unmap_single(dev
, desc_cb
->dma
, desc_cb
->length
,
1996 else if (desc_cb
->type
&
1997 (DESC_TYPE_BOUNCE_HEAD
| DESC_TYPE_BOUNCE_ALL
))
1998 hns3_tx_spare_rollback(ring
, desc_cb
->length
);
1999 else if (desc_cb
->length
)
2000 dma_unmap_page(dev
, desc_cb
->dma
, desc_cb
->length
,
2003 desc_cb
->length
= 0;
2005 desc_cb
->type
= DESC_TYPE_UNKNOWN
;
2009 static int hns3_fill_skb_to_desc(struct hns3_enet_ring
*ring
,
2010 struct sk_buff
*skb
, unsigned int type
)
2012 struct sk_buff
*frag_skb
;
2013 int i
, ret
, bd_num
= 0;
2015 ret
= hns3_map_and_fill_desc(ring
, skb
, type
);
2016 if (unlikely(ret
< 0))
2021 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2022 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2024 ret
= hns3_map_and_fill_desc(ring
, frag
, DESC_TYPE_PAGE
);
2025 if (unlikely(ret
< 0))
2031 skb_walk_frags(skb
, frag_skb
) {
2032 ret
= hns3_fill_skb_to_desc(ring
, frag_skb
,
2033 DESC_TYPE_FRAGLIST_SKB
);
2034 if (unlikely(ret
< 0))
2043 static void hns3_tx_push_bd(struct hns3_enet_ring
*ring
, int num
)
2045 #define HNS3_BYTES_PER_64BIT 8
2047 struct hns3_desc desc
[HNS3_MAX_PUSH_BD_NUM
] = {};
2050 /* make sure everything is visible to device before
2051 * excuting tx push or updating doorbell
2056 int idx
= (ring
->next_to_use
- num
+ ring
->desc_num
) %
2059 u64_stats_update_begin(&ring
->syncp
);
2060 ring
->stats
.tx_push
++;
2061 u64_stats_update_end(&ring
->syncp
);
2062 memcpy(&desc
[offset
], &ring
->desc
[idx
],
2063 sizeof(struct hns3_desc
));
2067 __iowrite64_copy(ring
->tqp
->mem_base
, desc
,
2068 (sizeof(struct hns3_desc
) * HNS3_MAX_PUSH_BD_NUM
) /
2069 HNS3_BYTES_PER_64BIT
);
2074 static void hns3_tx_mem_doorbell(struct hns3_enet_ring
*ring
)
2076 #define HNS3_MEM_DOORBELL_OFFSET 64
2078 __le64 bd_num
= cpu_to_le64((u64
)ring
->pending_buf
);
2080 /* make sure everything is visible to device before
2081 * excuting tx push or updating doorbell
2085 __iowrite64_copy(ring
->tqp
->mem_base
+ HNS3_MEM_DOORBELL_OFFSET
,
2087 u64_stats_update_begin(&ring
->syncp
);
2088 ring
->stats
.tx_mem_doorbell
+= ring
->pending_buf
;
2089 u64_stats_update_end(&ring
->syncp
);
2094 static void hns3_tx_doorbell(struct hns3_enet_ring
*ring
, int num
,
2097 struct net_device
*netdev
= ring_to_netdev(ring
);
2098 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2100 /* when tx push is enabled, the packet whose number of BD below
2101 * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
2103 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE
, &priv
->state
) && num
&&
2104 !ring
->pending_buf
&& num
<= HNS3_MAX_PUSH_BD_NUM
&& doorbell
) {
2105 hns3_tx_push_bd(ring
, num
);
2106 WRITE_ONCE(ring
->last_to_use
, ring
->next_to_use
);
2110 ring
->pending_buf
+= num
;
2113 hns3_ring_stats_update(ring
, tx_more
);
2117 if (ring
->tqp
->mem_base
)
2118 hns3_tx_mem_doorbell(ring
);
2120 writel(ring
->pending_buf
,
2121 ring
->tqp
->io_base
+ HNS3_RING_TX_RING_TAIL_REG
);
2123 ring
->pending_buf
= 0;
2124 WRITE_ONCE(ring
->last_to_use
, ring
->next_to_use
);
2127 static void hns3_tsyn(struct net_device
*netdev
, struct sk_buff
*skb
,
2128 struct hns3_desc
*desc
)
2130 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2132 if (!(h
->ae_algo
->ops
->set_tx_hwts_info
&&
2133 h
->ae_algo
->ops
->set_tx_hwts_info(h
, skb
)))
2136 desc
->tx
.bdtp_fe_sc_vld_ra_ri
|= cpu_to_le16(BIT(HNS3_TXD_TSYN_B
));
2139 static int hns3_handle_tx_bounce(struct hns3_enet_ring
*ring
,
2140 struct sk_buff
*skb
)
2142 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2143 unsigned int type
= DESC_TYPE_BOUNCE_HEAD
;
2144 unsigned int size
= skb_headlen(skb
);
2151 if (skb
->len
<= ring
->tx_copybreak
) {
2153 type
= DESC_TYPE_BOUNCE_ALL
;
2156 /* hns3_can_use_tx_bounce() is called to ensure the below
2157 * function can always return the tx buffer.
2159 buf
= hns3_tx_spare_alloc(ring
, size
, &dma
, &cb_len
);
2161 ret
= skb_copy_bits(skb
, 0, buf
, size
);
2162 if (unlikely(ret
< 0)) {
2163 hns3_tx_spare_rollback(ring
, cb_len
);
2164 hns3_ring_stats_update(ring
, copy_bits_err
);
2168 desc_cb
->priv
= skb
;
2169 desc_cb
->length
= cb_len
;
2171 desc_cb
->type
= type
;
2173 bd_num
+= hns3_fill_desc(ring
, dma
, size
);
2175 if (type
== DESC_TYPE_BOUNCE_HEAD
) {
2176 ret
= hns3_fill_skb_to_desc(ring
, skb
,
2177 DESC_TYPE_BOUNCE_HEAD
);
2178 if (unlikely(ret
< 0))
2184 dma_sync_single_for_device(ring_to_dev(ring
), dma
, size
,
2187 hns3_ring_stats_update(ring
, tx_bounce
);
2192 static int hns3_handle_tx_sgl(struct hns3_enet_ring
*ring
,
2193 struct sk_buff
*skb
)
2195 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2196 u32 nfrag
= skb_shinfo(skb
)->nr_frags
+ 1;
2197 struct sg_table
*sgt
;
2203 if (skb_has_frag_list(skb
))
2204 nfrag
= HNS3_MAX_TSO_BD_NUM
;
2206 /* hns3_can_use_tx_sgl() is called to ensure the below
2207 * function can always return the tx buffer.
2209 sgt
= hns3_tx_spare_alloc(ring
, HNS3_SGL_SIZE(nfrag
),
2212 /* scatterlist follows by the sg table */
2213 sgt
->sgl
= (struct scatterlist
*)(sgt
+ 1);
2214 sg_init_table(sgt
->sgl
, nfrag
);
2215 nents
= skb_to_sgvec(skb
, sgt
->sgl
, 0, skb
->len
);
2216 if (unlikely(nents
< 0)) {
2217 hns3_tx_spare_rollback(ring
, cb_len
);
2218 hns3_ring_stats_update(ring
, skb2sgl_err
);
2222 sgt
->orig_nents
= nents
;
2223 sgt
->nents
= dma_map_sg(ring_to_dev(ring
), sgt
->sgl
, sgt
->orig_nents
,
2225 if (unlikely(!sgt
->nents
)) {
2226 hns3_tx_spare_rollback(ring
, cb_len
);
2227 hns3_ring_stats_update(ring
, map_sg_err
);
2231 desc_cb
->priv
= skb
;
2232 desc_cb
->length
= cb_len
;
2234 desc_cb
->type
= DESC_TYPE_SGL_SKB
;
2236 for (i
= 0; i
< sgt
->nents
; i
++)
2237 bd_num
+= hns3_fill_desc(ring
, sg_dma_address(sgt
->sgl
+ i
),
2238 sg_dma_len(sgt
->sgl
+ i
));
2239 hns3_ring_stats_update(ring
, tx_sgl
);
2244 static int hns3_handle_desc_filling(struct hns3_enet_ring
*ring
,
2245 struct sk_buff
*skb
)
2249 if (!ring
->tx_spare
)
2252 space
= hns3_tx_spare_space(ring
);
2254 if (hns3_can_use_tx_sgl(ring
, skb
, space
))
2255 return hns3_handle_tx_sgl(ring
, skb
);
2257 if (hns3_can_use_tx_bounce(ring
, skb
, space
))
2258 return hns3_handle_tx_bounce(ring
, skb
);
2261 return hns3_fill_skb_to_desc(ring
, skb
, DESC_TYPE_SKB
);
2264 static int hns3_handle_skb_desc(struct hns3_enet_ring
*ring
,
2265 struct sk_buff
*skb
,
2266 struct hns3_desc_cb
*desc_cb
,
2267 int next_to_use_head
)
2271 ret
= hns3_fill_skb_desc(ring
, skb
, &ring
->desc
[ring
->next_to_use
],
2273 if (unlikely(ret
< 0))
2276 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2277 * zero, which is unlikely, and 'ret > 0' means how many tx desc
2278 * need to be notified to the hw.
2280 ret
= hns3_handle_desc_filling(ring
, skb
);
2281 if (likely(ret
> 0))
2285 hns3_clear_desc(ring
, next_to_use_head
);
2289 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2291 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2292 struct hns3_enet_ring
*ring
= &priv
->ring
[skb
->queue_mapping
];
2293 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2294 struct netdev_queue
*dev_queue
;
2298 /* Hardware can only handle short frames above 32 bytes */
2299 if (skb_put_padto(skb
, HNS3_MIN_TX_LEN
)) {
2300 hns3_tx_doorbell(ring
, 0, !netdev_xmit_more());
2302 hns3_ring_stats_update(ring
, sw_err_cnt
);
2304 return NETDEV_TX_OK
;
2307 /* Prefetch the data used later */
2308 prefetch(skb
->data
);
2310 ret
= hns3_nic_maybe_stop_tx(ring
, netdev
, skb
);
2311 if (unlikely(ret
<= 0)) {
2312 if (ret
== -EBUSY
) {
2313 hns3_tx_doorbell(ring
, 0, true);
2314 return NETDEV_TX_BUSY
;
2317 hns3_rl_err(netdev
, "xmit error: %d!\n", ret
);
2321 ret
= hns3_handle_skb_desc(ring
, skb
, desc_cb
, ring
->next_to_use
);
2322 if (unlikely(ret
<= 0))
2325 pre_ntu
= ring
->next_to_use
? (ring
->next_to_use
- 1) :
2326 (ring
->desc_num
- 1);
2328 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
))
2329 hns3_tsyn(netdev
, skb
, &ring
->desc
[pre_ntu
]);
2331 ring
->desc
[pre_ntu
].tx
.bdtp_fe_sc_vld_ra_ri
|=
2332 cpu_to_le16(BIT(HNS3_TXD_FE_B
));
2333 trace_hns3_tx_desc(ring
, pre_ntu
);
2335 skb_tx_timestamp(skb
);
2337 /* Complete translate all packets */
2338 dev_queue
= netdev_get_tx_queue(netdev
, ring
->queue_index
);
2339 doorbell
= __netdev_tx_sent_queue(dev_queue
, desc_cb
->send_bytes
,
2340 netdev_xmit_more());
2341 hns3_tx_doorbell(ring
, ret
, doorbell
);
2343 return NETDEV_TX_OK
;
2346 dev_kfree_skb_any(skb
);
2347 hns3_tx_doorbell(ring
, 0, !netdev_xmit_more());
2348 return NETDEV_TX_OK
;
2351 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
2353 char format_mac_addr_perm
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2354 char format_mac_addr_sa
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2355 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2356 struct sockaddr
*mac_addr
= p
;
2359 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
2360 return -EADDRNOTAVAIL
;
2362 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
2363 hnae3_format_mac_addr(format_mac_addr_sa
, mac_addr
->sa_data
);
2364 netdev_info(netdev
, "already using mac address %s\n",
2365 format_mac_addr_sa
);
2369 /* For VF device, if there is a perm_addr, then the user will not
2370 * be allowed to change the address.
2372 if (!hns3_is_phys_func(h
->pdev
) &&
2373 !is_zero_ether_addr(netdev
->perm_addr
)) {
2374 hnae3_format_mac_addr(format_mac_addr_perm
, netdev
->perm_addr
);
2375 hnae3_format_mac_addr(format_mac_addr_sa
, mac_addr
->sa_data
);
2376 netdev_err(netdev
, "has permanent MAC %s, user MAC %s not allow\n",
2377 format_mac_addr_perm
, format_mac_addr_sa
);
2381 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
2383 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
2387 eth_hw_addr_set(netdev
, mac_addr
->sa_data
);
2392 static int hns3_nic_do_ioctl(struct net_device
*netdev
,
2393 struct ifreq
*ifr
, int cmd
)
2395 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2397 if (!netif_running(netdev
))
2400 if (!h
->ae_algo
->ops
->do_ioctl
)
2403 return h
->ae_algo
->ops
->do_ioctl(h
, ifr
, cmd
);
2406 static int hns3_nic_set_features(struct net_device
*netdev
,
2407 netdev_features_t features
)
2409 netdev_features_t changed
= netdev
->features
^ features
;
2410 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2411 struct hnae3_handle
*h
= priv
->ae_handle
;
2415 if (changed
& (NETIF_F_GRO_HW
) && h
->ae_algo
->ops
->set_gro_en
) {
2416 enable
= !!(features
& NETIF_F_GRO_HW
);
2417 ret
= h
->ae_algo
->ops
->set_gro_en(h
, enable
);
2422 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2423 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
2424 enable
= !!(features
& NETIF_F_HW_VLAN_CTAG_RX
);
2425 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, enable
);
2430 if ((changed
& NETIF_F_NTUPLE
) && h
->ae_algo
->ops
->enable_fd
) {
2431 enable
= !!(features
& NETIF_F_NTUPLE
);
2432 h
->ae_algo
->ops
->enable_fd(h
, enable
);
2435 if ((netdev
->features
& NETIF_F_HW_TC
) > (features
& NETIF_F_HW_TC
) &&
2436 h
->ae_algo
->ops
->cls_flower_active(h
)) {
2438 "there are offloaded TC filters active, cannot disable HW TC offload");
2442 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2443 h
->ae_algo
->ops
->enable_vlan_filter
) {
2444 enable
= !!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
);
2445 ret
= h
->ae_algo
->ops
->enable_vlan_filter(h
, enable
);
2450 netdev
->features
= features
;
2454 static netdev_features_t
hns3_features_check(struct sk_buff
*skb
,
2455 struct net_device
*dev
,
2456 netdev_features_t features
)
2458 #define HNS3_MAX_HDR_LEN 480U
2459 #define HNS3_MAX_L4_HDR_LEN 60U
2463 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2466 if (skb
->encapsulation
)
2467 len
= skb_inner_transport_header(skb
) - skb
->data
;
2469 len
= skb_transport_header(skb
) - skb
->data
;
2471 /* Assume L4 is 60 byte as TCP is the only protocol with a
2472 * a flexible value, and it's max len is 60 bytes.
2474 len
+= HNS3_MAX_L4_HDR_LEN
;
2476 /* Hardware only supports checksum on the skb with a max header
2479 if (len
> HNS3_MAX_HDR_LEN
)
2480 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2485 static void hns3_fetch_stats(struct rtnl_link_stats64
*stats
,
2486 struct hns3_enet_ring
*ring
, bool is_tx
)
2491 start
= u64_stats_fetch_begin(&ring
->syncp
);
2493 stats
->tx_bytes
+= ring
->stats
.tx_bytes
;
2494 stats
->tx_packets
+= ring
->stats
.tx_pkts
;
2495 stats
->tx_dropped
+= ring
->stats
.sw_err_cnt
;
2496 stats
->tx_dropped
+= ring
->stats
.tx_vlan_err
;
2497 stats
->tx_dropped
+= ring
->stats
.tx_l4_proto_err
;
2498 stats
->tx_dropped
+= ring
->stats
.tx_l2l3l4_err
;
2499 stats
->tx_dropped
+= ring
->stats
.tx_tso_err
;
2500 stats
->tx_dropped
+= ring
->stats
.over_max_recursion
;
2501 stats
->tx_dropped
+= ring
->stats
.hw_limitation
;
2502 stats
->tx_dropped
+= ring
->stats
.copy_bits_err
;
2503 stats
->tx_dropped
+= ring
->stats
.skb2sgl_err
;
2504 stats
->tx_dropped
+= ring
->stats
.map_sg_err
;
2505 stats
->tx_errors
+= ring
->stats
.sw_err_cnt
;
2506 stats
->tx_errors
+= ring
->stats
.tx_vlan_err
;
2507 stats
->tx_errors
+= ring
->stats
.tx_l4_proto_err
;
2508 stats
->tx_errors
+= ring
->stats
.tx_l2l3l4_err
;
2509 stats
->tx_errors
+= ring
->stats
.tx_tso_err
;
2510 stats
->tx_errors
+= ring
->stats
.over_max_recursion
;
2511 stats
->tx_errors
+= ring
->stats
.hw_limitation
;
2512 stats
->tx_errors
+= ring
->stats
.copy_bits_err
;
2513 stats
->tx_errors
+= ring
->stats
.skb2sgl_err
;
2514 stats
->tx_errors
+= ring
->stats
.map_sg_err
;
2516 stats
->rx_bytes
+= ring
->stats
.rx_bytes
;
2517 stats
->rx_packets
+= ring
->stats
.rx_pkts
;
2518 stats
->rx_dropped
+= ring
->stats
.l2_err
;
2519 stats
->rx_errors
+= ring
->stats
.l2_err
;
2520 stats
->rx_errors
+= ring
->stats
.l3l4_csum_err
;
2521 stats
->rx_crc_errors
+= ring
->stats
.l2_err
;
2522 stats
->multicast
+= ring
->stats
.rx_multicast
;
2523 stats
->rx_length_errors
+= ring
->stats
.err_pkt_len
;
2525 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
2528 static void hns3_nic_get_stats64(struct net_device
*netdev
,
2529 struct rtnl_link_stats64
*stats
)
2531 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2532 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2533 struct hnae3_handle
*handle
= priv
->ae_handle
;
2534 struct rtnl_link_stats64 ring_total_stats
;
2535 struct hns3_enet_ring
*ring
;
2538 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
2541 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
2543 memset(&ring_total_stats
, 0, sizeof(ring_total_stats
));
2544 for (idx
= 0; idx
< queue_num
; idx
++) {
2545 /* fetch the tx stats */
2546 ring
= &priv
->ring
[idx
];
2547 hns3_fetch_stats(&ring_total_stats
, ring
, true);
2549 /* fetch the rx stats */
2550 ring
= &priv
->ring
[idx
+ queue_num
];
2551 hns3_fetch_stats(&ring_total_stats
, ring
, false);
2554 stats
->tx_bytes
= ring_total_stats
.tx_bytes
;
2555 stats
->tx_packets
= ring_total_stats
.tx_packets
;
2556 stats
->rx_bytes
= ring_total_stats
.rx_bytes
;
2557 stats
->rx_packets
= ring_total_stats
.rx_packets
;
2559 stats
->rx_errors
= ring_total_stats
.rx_errors
;
2560 stats
->multicast
= ring_total_stats
.multicast
;
2561 stats
->rx_length_errors
= ring_total_stats
.rx_length_errors
;
2562 stats
->rx_crc_errors
= ring_total_stats
.rx_crc_errors
;
2563 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
2565 stats
->tx_errors
= ring_total_stats
.tx_errors
;
2566 stats
->rx_dropped
= ring_total_stats
.rx_dropped
;
2567 stats
->tx_dropped
= ring_total_stats
.tx_dropped
;
2568 stats
->collisions
= netdev
->stats
.collisions
;
2569 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
2570 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
2571 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
2572 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
2573 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
2574 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
2575 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
2576 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
2577 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
2578 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
2581 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
2583 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
2584 struct hnae3_knic_private_info
*kinfo
;
2585 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
2586 u16 mode
= mqprio_qopt
->mode
;
2587 u8 hw
= mqprio_qopt
->qopt
.hw
;
2588 struct hnae3_handle
*h
;
2590 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
2591 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
2594 if (tc
> HNAE3_MAX_TC
)
2600 h
= hns3_get_handle(netdev
);
2603 netif_dbg(h
, drv
, netdev
, "setup tc: num_tc=%u\n", tc
);
2605 return (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
2606 kinfo
->dcb_ops
->setup_tc(h
, mqprio_qopt
) : -EOPNOTSUPP
;
2609 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv
*priv
,
2610 struct flow_cls_offload
*flow
)
2612 int tc
= tc_classid_to_hwtc(priv
->netdev
, flow
->classid
);
2613 struct hnae3_handle
*h
= hns3_get_handle(priv
->netdev
);
2615 switch (flow
->command
) {
2616 case FLOW_CLS_REPLACE
:
2617 if (h
->ae_algo
->ops
->add_cls_flower
)
2618 return h
->ae_algo
->ops
->add_cls_flower(h
, flow
, tc
);
2620 case FLOW_CLS_DESTROY
:
2621 if (h
->ae_algo
->ops
->del_cls_flower
)
2622 return h
->ae_algo
->ops
->del_cls_flower(h
, flow
);
2631 static int hns3_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
2634 struct hns3_nic_priv
*priv
= cb_priv
;
2636 if (!tc_cls_can_offload_and_chain0(priv
->netdev
, type_data
))
2640 case TC_SETUP_CLSFLOWER
:
2641 return hns3_setup_tc_cls_flower(priv
, type_data
);
2647 static LIST_HEAD(hns3_block_cb_list
);
2649 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
2652 struct hns3_nic_priv
*priv
= netdev_priv(dev
);
2656 case TC_SETUP_QDISC_MQPRIO
:
2657 ret
= hns3_setup_tc(dev
, type_data
);
2659 case TC_SETUP_BLOCK
:
2660 ret
= flow_block_cb_setup_simple(type_data
,
2661 &hns3_block_cb_list
,
2662 hns3_setup_tc_block_cb
,
2672 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
2673 __be16 proto
, u16 vid
)
2675 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2678 if (h
->ae_algo
->ops
->set_vlan_filter
)
2679 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
2684 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
2685 __be16 proto
, u16 vid
)
2687 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2690 if (h
->ae_algo
->ops
->set_vlan_filter
)
2691 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
2696 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
2697 u8 qos
, __be16 vlan_proto
)
2699 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2702 netif_dbg(h
, drv
, netdev
,
2703 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2704 vf
, vlan
, qos
, ntohs(vlan_proto
));
2706 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
2707 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
2713 static int hns3_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool enable
)
2715 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
2717 if (hns3_nic_resetting(netdev
))
2720 if (!handle
->ae_algo
->ops
->set_vf_spoofchk
)
2723 return handle
->ae_algo
->ops
->set_vf_spoofchk(handle
, vf
, enable
);
2726 static int hns3_set_vf_trust(struct net_device
*netdev
, int vf
, bool enable
)
2728 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
2730 if (!handle
->ae_algo
->ops
->set_vf_trust
)
2733 return handle
->ae_algo
->ops
->set_vf_trust(handle
, vf
, enable
);
2736 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
2738 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2741 if (hns3_nic_resetting(netdev
))
2744 if (!h
->ae_algo
->ops
->set_mtu
)
2747 netif_dbg(h
, drv
, netdev
,
2748 "change mtu from %u to %d\n", netdev
->mtu
, new_mtu
);
2750 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
2752 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
2755 netdev
->mtu
= new_mtu
;
2760 static int hns3_get_timeout_queue(struct net_device
*ndev
)
2764 /* Find the stopped queue the same way the stack does */
2765 for (i
= 0; i
< ndev
->num_tx_queues
; i
++) {
2766 struct netdev_queue
*q
;
2767 unsigned long trans_start
;
2769 q
= netdev_get_tx_queue(ndev
, i
);
2770 trans_start
= READ_ONCE(q
->trans_start
);
2771 if (netif_xmit_stopped(q
) &&
2773 (trans_start
+ ndev
->watchdog_timeo
))) {
2775 struct dql
*dql
= &q
->dql
;
2777 netdev_info(ndev
, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
2778 dql
->last_obj_cnt
, dql
->num_queued
,
2779 dql
->adj_limit
, dql
->num_completed
);
2781 netdev_info(ndev
, "queue state: 0x%lx, delta msecs: %u\n",
2783 jiffies_to_msecs(jiffies
- trans_start
));
2791 static void hns3_dump_queue_stats(struct net_device
*ndev
,
2792 struct hns3_enet_ring
*tx_ring
,
2795 struct napi_struct
*napi
= &tx_ring
->tqp_vector
->napi
;
2796 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2799 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2800 priv
->tx_timeout_count
, timeout_queue
, tx_ring
->next_to_use
,
2801 tx_ring
->next_to_clean
, napi
->state
);
2804 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2805 tx_ring
->stats
.tx_pkts
, tx_ring
->stats
.tx_bytes
,
2806 tx_ring
->stats
.sw_err_cnt
, tx_ring
->pending_buf
);
2809 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2810 tx_ring
->stats
.seg_pkt_cnt
, tx_ring
->stats
.tx_more
,
2811 tx_ring
->stats
.restart_queue
, tx_ring
->stats
.tx_busy
);
2813 netdev_info(ndev
, "tx_push: %llu, tx_mem_doorbell: %llu\n",
2814 tx_ring
->stats
.tx_push
, tx_ring
->stats
.tx_mem_doorbell
);
2817 static void hns3_dump_queue_reg(struct net_device
*ndev
,
2818 struct hns3_enet_ring
*tx_ring
)
2821 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2822 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_BD_NUM_REG
),
2823 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_HEAD_REG
),
2824 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_TAIL_REG
),
2825 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_BD_ERR_REG
),
2826 readl(tx_ring
->tqp_vector
->mask_addr
));
2828 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2829 hns3_tqp_read_reg(tx_ring
, HNS3_RING_EN_REG
),
2830 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_TC_REG
),
2831 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_FBDNUM_REG
),
2832 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_OFFSET_REG
),
2833 hns3_tqp_read_reg(tx_ring
, HNS3_RING_TX_RING_EBDNUM_REG
),
2834 hns3_tqp_read_reg(tx_ring
,
2835 HNS3_RING_TX_RING_EBD_OFFSET_REG
));
2838 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
2840 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2841 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2842 struct hns3_enet_ring
*tx_ring
;
2845 timeout_queue
= hns3_get_timeout_queue(ndev
);
2846 if (timeout_queue
>= ndev
->num_tx_queues
) {
2848 "no netdev TX timeout queue found, timeout count: %llu\n",
2849 priv
->tx_timeout_count
);
2853 priv
->tx_timeout_count
++;
2855 tx_ring
= &priv
->ring
[timeout_queue
];
2856 hns3_dump_queue_stats(ndev
, tx_ring
, timeout_queue
);
2858 /* When mac received many pause frames continuous, it's unable to send
2859 * packets, which may cause tx timeout
2861 if (h
->ae_algo
->ops
->get_mac_stats
) {
2862 struct hns3_mac_stats mac_stats
;
2864 h
->ae_algo
->ops
->get_mac_stats(h
, &mac_stats
);
2865 netdev_info(ndev
, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2866 mac_stats
.tx_pause_cnt
, mac_stats
.rx_pause_cnt
);
2869 hns3_dump_queue_reg(ndev
, tx_ring
);
2874 static void hns3_nic_net_timeout(struct net_device
*ndev
, unsigned int txqueue
)
2876 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
2877 struct hnae3_handle
*h
= priv
->ae_handle
;
2879 if (!hns3_get_tx_timeo_queue_info(ndev
))
2882 /* request the reset, and let the hclge to determine
2883 * which reset level should be done
2885 if (h
->ae_algo
->ops
->reset_event
)
2886 h
->ae_algo
->ops
->reset_event(h
->pdev
, h
);
2889 #ifdef CONFIG_RFS_ACCEL
2890 static int hns3_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
2891 u16 rxq_index
, u32 flow_id
)
2893 struct hnae3_handle
*h
= hns3_get_handle(dev
);
2894 struct flow_keys fkeys
;
2896 if (!h
->ae_algo
->ops
->add_arfs_entry
)
2899 if (skb
->encapsulation
)
2900 return -EPROTONOSUPPORT
;
2902 if (!skb_flow_dissect_flow_keys(skb
, &fkeys
, 0))
2903 return -EPROTONOSUPPORT
;
2905 if ((fkeys
.basic
.n_proto
!= htons(ETH_P_IP
) &&
2906 fkeys
.basic
.n_proto
!= htons(ETH_P_IPV6
)) ||
2907 (fkeys
.basic
.ip_proto
!= IPPROTO_TCP
&&
2908 fkeys
.basic
.ip_proto
!= IPPROTO_UDP
))
2909 return -EPROTONOSUPPORT
;
2911 return h
->ae_algo
->ops
->add_arfs_entry(h
, rxq_index
, flow_id
, &fkeys
);
2915 static int hns3_nic_get_vf_config(struct net_device
*ndev
, int vf
,
2916 struct ifla_vf_info
*ivf
)
2918 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2920 if (!h
->ae_algo
->ops
->get_vf_config
)
2923 return h
->ae_algo
->ops
->get_vf_config(h
, vf
, ivf
);
2926 static int hns3_nic_set_vf_link_state(struct net_device
*ndev
, int vf
,
2929 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2931 if (!h
->ae_algo
->ops
->set_vf_link_state
)
2934 return h
->ae_algo
->ops
->set_vf_link_state(h
, vf
, link_state
);
2937 static int hns3_nic_set_vf_rate(struct net_device
*ndev
, int vf
,
2938 int min_tx_rate
, int max_tx_rate
)
2940 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
2942 if (!h
->ae_algo
->ops
->set_vf_rate
)
2945 return h
->ae_algo
->ops
->set_vf_rate(h
, vf
, min_tx_rate
, max_tx_rate
,
2949 static int hns3_nic_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
2951 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2952 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
2954 if (!h
->ae_algo
->ops
->set_vf_mac
)
2957 if (is_multicast_ether_addr(mac
)) {
2958 hnae3_format_mac_addr(format_mac_addr
, mac
);
2960 "Invalid MAC:%s specified. Could not set MAC\n",
2965 return h
->ae_algo
->ops
->set_vf_mac(h
, vf_id
, mac
);
2968 #define HNS3_INVALID_DSCP 0xff
2969 #define HNS3_DSCP_SHIFT 2
2971 static u8
hns3_get_skb_dscp(struct sk_buff
*skb
)
2973 __be16 protocol
= skb
->protocol
;
2974 u8 dscp
= HNS3_INVALID_DSCP
;
2976 if (protocol
== htons(ETH_P_8021Q
))
2977 protocol
= vlan_get_protocol(skb
);
2979 if (protocol
== htons(ETH_P_IP
))
2980 dscp
= ipv4_get_dsfield(ip_hdr(skb
)) >> HNS3_DSCP_SHIFT
;
2981 else if (protocol
== htons(ETH_P_IPV6
))
2982 dscp
= ipv6_get_dsfield(ipv6_hdr(skb
)) >> HNS3_DSCP_SHIFT
;
2987 static u16
hns3_nic_select_queue(struct net_device
*netdev
,
2988 struct sk_buff
*skb
,
2989 struct net_device
*sb_dev
)
2991 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
2994 if (h
->kinfo
.tc_map_mode
!= HNAE3_TC_MAP_MODE_DSCP
||
2995 !h
->ae_algo
->ops
->get_dscp_prio
)
2998 dscp
= hns3_get_skb_dscp(skb
);
2999 if (unlikely(dscp
>= HNAE3_MAX_DSCP
))
3002 skb
->priority
= h
->kinfo
.dscp_prio
[dscp
];
3003 if (skb
->priority
== HNAE3_PRIO_ID_INVALID
)
3007 return netdev_pick_tx(netdev
, skb
, sb_dev
);
3010 static const struct net_device_ops hns3_nic_netdev_ops
= {
3011 .ndo_open
= hns3_nic_net_open
,
3012 .ndo_stop
= hns3_nic_net_stop
,
3013 .ndo_start_xmit
= hns3_nic_net_xmit
,
3014 .ndo_tx_timeout
= hns3_nic_net_timeout
,
3015 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
3016 .ndo_eth_ioctl
= hns3_nic_do_ioctl
,
3017 .ndo_change_mtu
= hns3_nic_change_mtu
,
3018 .ndo_set_features
= hns3_nic_set_features
,
3019 .ndo_features_check
= hns3_features_check
,
3020 .ndo_get_stats64
= hns3_nic_get_stats64
,
3021 .ndo_setup_tc
= hns3_nic_setup_tc
,
3022 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
3023 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
3024 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
3025 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
3026 .ndo_set_vf_spoofchk
= hns3_set_vf_spoofchk
,
3027 .ndo_set_vf_trust
= hns3_set_vf_trust
,
3028 #ifdef CONFIG_RFS_ACCEL
3029 .ndo_rx_flow_steer
= hns3_rx_flow_steer
,
3031 .ndo_get_vf_config
= hns3_nic_get_vf_config
,
3032 .ndo_set_vf_link_state
= hns3_nic_set_vf_link_state
,
3033 .ndo_set_vf_rate
= hns3_nic_set_vf_rate
,
3034 .ndo_set_vf_mac
= hns3_nic_set_vf_mac
,
3035 .ndo_select_queue
= hns3_nic_select_queue
,
3038 bool hns3_is_phys_func(struct pci_dev
*pdev
)
3040 u32 dev_id
= pdev
->device
;
3043 case HNAE3_DEV_ID_GE
:
3044 case HNAE3_DEV_ID_25GE
:
3045 case HNAE3_DEV_ID_25GE_RDMA
:
3046 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
3047 case HNAE3_DEV_ID_50GE_RDMA
:
3048 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
3049 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
3050 case HNAE3_DEV_ID_200G_RDMA
:
3052 case HNAE3_DEV_ID_VF
:
3053 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF
:
3056 dev_warn(&pdev
->dev
, "un-recognized pci device-id %u",
3063 static void hns3_disable_sriov(struct pci_dev
*pdev
)
3065 /* If our VFs are assigned we cannot shut down SR-IOV
3066 * without causing issues, so just leave the hardware
3067 * available but disabled
3069 if (pci_vfs_assigned(pdev
)) {
3070 dev_warn(&pdev
->dev
,
3071 "disabling driver while VFs are assigned\n");
3075 pci_disable_sriov(pdev
);
3078 /* hns3_probe - Device initialization routine
3079 * @pdev: PCI device information struct
3080 * @ent: entry in hns3_pci_tbl
3082 * hns3_probe initializes a PF identified by a pci_dev structure.
3083 * The OS initialization, configuring of the PF private structure,
3084 * and a hardware reset occur.
3086 * Returns 0 on success, negative on failure
3088 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3090 struct hnae3_ae_dev
*ae_dev
;
3093 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
), GFP_KERNEL
);
3097 ae_dev
->pdev
= pdev
;
3098 ae_dev
->flag
= ent
->driver_data
;
3099 pci_set_drvdata(pdev
, ae_dev
);
3101 ret
= hnae3_register_ae_dev(ae_dev
);
3103 pci_set_drvdata(pdev
, NULL
);
3109 * hns3_clean_vf_config
3110 * @pdev: pointer to a pci_dev structure
3111 * @num_vfs: number of VFs allocated
3113 * Clean residual vf config after disable sriov
3115 static void hns3_clean_vf_config(struct pci_dev
*pdev
, int num_vfs
)
3117 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3119 if (ae_dev
->ops
->clean_vf_config
)
3120 ae_dev
->ops
->clean_vf_config(ae_dev
, num_vfs
);
3123 /* hns3_remove - Device removal routine
3124 * @pdev: PCI device information struct
3126 static void hns3_remove(struct pci_dev
*pdev
)
3128 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3130 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
3131 hns3_disable_sriov(pdev
);
3133 hnae3_unregister_ae_dev(ae_dev
);
3134 pci_set_drvdata(pdev
, NULL
);
3138 * hns3_pci_sriov_configure
3139 * @pdev: pointer to a pci_dev structure
3140 * @num_vfs: number of VFs to allocate
3142 * Enable or change the number of VFs. Called when the user updates the number
3145 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
3149 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
3150 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
3155 ret
= pci_enable_sriov(pdev
, num_vfs
);
3157 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
3160 } else if (!pci_vfs_assigned(pdev
)) {
3161 int num_vfs_pre
= pci_num_vf(pdev
);
3163 pci_disable_sriov(pdev
);
3164 hns3_clean_vf_config(pdev
, num_vfs_pre
);
3166 dev_warn(&pdev
->dev
,
3167 "Unable to free VFs because some are assigned to VMs.\n");
3173 static void hns3_shutdown(struct pci_dev
*pdev
)
3175 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3177 hnae3_unregister_ae_dev(ae_dev
);
3178 pci_set_drvdata(pdev
, NULL
);
3180 if (system_state
== SYSTEM_POWER_OFF
)
3181 pci_set_power_state(pdev
, PCI_D3hot
);
3184 static int __maybe_unused
hns3_suspend(struct device
*dev
)
3186 struct hnae3_ae_dev
*ae_dev
= dev_get_drvdata(dev
);
3188 if (ae_dev
&& hns3_is_phys_func(ae_dev
->pdev
)) {
3189 dev_info(dev
, "Begin to suspend.\n");
3190 if (ae_dev
->ops
&& ae_dev
->ops
->reset_prepare
)
3191 ae_dev
->ops
->reset_prepare(ae_dev
, HNAE3_FUNC_RESET
);
3197 static int __maybe_unused
hns3_resume(struct device
*dev
)
3199 struct hnae3_ae_dev
*ae_dev
= dev_get_drvdata(dev
);
3201 if (ae_dev
&& hns3_is_phys_func(ae_dev
->pdev
)) {
3202 dev_info(dev
, "Begin to resume.\n");
3203 if (ae_dev
->ops
&& ae_dev
->ops
->reset_done
)
3204 ae_dev
->ops
->reset_done(ae_dev
);
3210 static pci_ers_result_t
hns3_error_detected(struct pci_dev
*pdev
,
3211 pci_channel_state_t state
)
3213 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3214 pci_ers_result_t ret
;
3216 dev_info(&pdev
->dev
, "PCI error detected, state(=%u)!!\n", state
);
3218 if (state
== pci_channel_io_perm_failure
)
3219 return PCI_ERS_RESULT_DISCONNECT
;
3221 if (!ae_dev
|| !ae_dev
->ops
) {
3223 "Can't recover - error happened before device initialized\n");
3224 return PCI_ERS_RESULT_NONE
;
3227 if (ae_dev
->ops
->handle_hw_ras_error
)
3228 ret
= ae_dev
->ops
->handle_hw_ras_error(ae_dev
);
3230 return PCI_ERS_RESULT_NONE
;
3235 static pci_ers_result_t
hns3_slot_reset(struct pci_dev
*pdev
)
3237 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3238 const struct hnae3_ae_ops
*ops
;
3239 enum hnae3_reset_type reset_type
;
3240 struct device
*dev
= &pdev
->dev
;
3242 if (!ae_dev
|| !ae_dev
->ops
)
3243 return PCI_ERS_RESULT_NONE
;
3246 /* request the reset */
3247 if (ops
->reset_event
&& ops
->get_reset_level
&&
3248 ops
->set_default_reset_request
) {
3249 if (ae_dev
->hw_err_reset_req
) {
3250 reset_type
= ops
->get_reset_level(ae_dev
,
3251 &ae_dev
->hw_err_reset_req
);
3252 ops
->set_default_reset_request(ae_dev
, reset_type
);
3253 dev_info(dev
, "requesting reset due to PCI error\n");
3254 ops
->reset_event(pdev
, NULL
);
3257 return PCI_ERS_RESULT_RECOVERED
;
3260 return PCI_ERS_RESULT_DISCONNECT
;
3263 static void hns3_reset_prepare(struct pci_dev
*pdev
)
3265 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3267 dev_info(&pdev
->dev
, "FLR prepare\n");
3268 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->reset_prepare
)
3269 ae_dev
->ops
->reset_prepare(ae_dev
, HNAE3_FLR_RESET
);
3272 static void hns3_reset_done(struct pci_dev
*pdev
)
3274 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3276 dev_info(&pdev
->dev
, "FLR done\n");
3277 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->reset_done
)
3278 ae_dev
->ops
->reset_done(ae_dev
);
3281 static const struct pci_error_handlers hns3_err_handler
= {
3282 .error_detected
= hns3_error_detected
,
3283 .slot_reset
= hns3_slot_reset
,
3284 .reset_prepare
= hns3_reset_prepare
,
3285 .reset_done
= hns3_reset_done
,
3288 static SIMPLE_DEV_PM_OPS(hns3_pm_ops
, hns3_suspend
, hns3_resume
);
3290 static struct pci_driver hns3_driver
= {
3291 .name
= hns3_driver_name
,
3292 .id_table
= hns3_pci_tbl
,
3293 .probe
= hns3_probe
,
3294 .remove
= hns3_remove
,
3295 .shutdown
= hns3_shutdown
,
3296 .driver
.pm
= &hns3_pm_ops
,
3297 .sriov_configure
= hns3_pci_sriov_configure
,
3298 .err_handler
= &hns3_err_handler
,
3301 /* set default feature to hns3 */
3302 static void hns3_set_default_feature(struct net_device
*netdev
)
3304 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3305 struct pci_dev
*pdev
= h
->pdev
;
3306 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3308 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3310 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
3312 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3313 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
3314 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
3315 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
3316 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
3317 NETIF_F_SCTP_CRC
| NETIF_F_FRAGLIST
;
3319 if (hnae3_ae_dev_gro_supported(ae_dev
))
3320 netdev
->features
|= NETIF_F_GRO_HW
;
3322 if (hnae3_ae_dev_fd_supported(ae_dev
))
3323 netdev
->features
|= NETIF_F_NTUPLE
;
3325 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B
, ae_dev
->caps
))
3326 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
3328 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
))
3329 netdev
->features
|= NETIF_F_HW_CSUM
;
3331 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3333 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B
, ae_dev
->caps
))
3334 netdev
->features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3336 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
))
3337 netdev
->features
|= NETIF_F_HW_TC
;
3339 netdev
->hw_features
|= netdev
->features
;
3340 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B
, ae_dev
->caps
))
3341 netdev
->hw_features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
3343 netdev
->vlan_features
|= netdev
->features
&
3344 ~(NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
|
3345 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_GRO_HW
| NETIF_F_NTUPLE
|
3348 netdev
->hw_enc_features
|= netdev
->vlan_features
| NETIF_F_TSO_MANGLEID
;
3351 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
3352 struct hns3_desc_cb
*cb
)
3354 unsigned int order
= hns3_page_order(ring
);
3357 if (ring
->page_pool
) {
3358 p
= page_pool_dev_alloc_frag(ring
->page_pool
,
3360 hns3_buf_size(ring
));
3365 cb
->buf
= page_address(p
);
3366 cb
->dma
= page_pool_get_dma_addr(p
);
3367 cb
->type
= DESC_TYPE_PP_FRAG
;
3372 p
= dev_alloc_pages(order
);
3377 cb
->page_offset
= 0;
3379 cb
->buf
= page_address(p
);
3380 cb
->length
= hns3_page_size(ring
);
3381 cb
->type
= DESC_TYPE_PAGE
;
3382 page_ref_add(p
, USHRT_MAX
- 1);
3383 cb
->pagecnt_bias
= USHRT_MAX
;
3388 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
3389 struct hns3_desc_cb
*cb
, int budget
)
3391 if (cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_BOUNCE_HEAD
|
3392 DESC_TYPE_BOUNCE_ALL
| DESC_TYPE_SGL_SKB
))
3393 napi_consume_skb(cb
->priv
, budget
);
3394 else if (!HNAE3_IS_TX_RING(ring
)) {
3395 if (cb
->type
& DESC_TYPE_PAGE
&& cb
->pagecnt_bias
)
3396 __page_frag_cache_drain(cb
->priv
, cb
->pagecnt_bias
);
3397 else if (cb
->type
& DESC_TYPE_PP_FRAG
)
3398 page_pool_put_full_page(ring
->page_pool
, cb
->priv
,
3401 memset(cb
, 0, sizeof(*cb
));
3404 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
3406 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
3407 cb
->length
, ring_to_dma_dir(ring
));
3409 if (unlikely(dma_mapping_error(ring_to_dev(ring
), cb
->dma
)))
3415 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
3416 struct hns3_desc_cb
*cb
)
3418 if (cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_FRAGLIST_SKB
))
3419 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
3420 ring_to_dma_dir(ring
));
3421 else if ((cb
->type
& DESC_TYPE_PAGE
) && cb
->length
)
3422 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
3423 ring_to_dma_dir(ring
));
3424 else if (cb
->type
& (DESC_TYPE_BOUNCE_ALL
| DESC_TYPE_BOUNCE_HEAD
|
3426 hns3_tx_spare_reclaim_cb(ring
, cb
);
3429 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
3431 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
3432 ring
->desc
[i
].addr
= 0;
3433 ring
->desc_cb
[i
].refill
= 0;
3436 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
,
3439 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
3441 if (!ring
->desc_cb
[i
].dma
)
3444 hns3_buffer_detach(ring
, i
);
3445 hns3_free_buffer(ring
, cb
, budget
);
3448 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
3452 for (i
= 0; i
< ring
->desc_num
; i
++)
3453 hns3_free_buffer_detach(ring
, i
, 0);
3456 /* free desc along with its attached buffer */
3457 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
3459 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
3461 hns3_free_buffers(ring
);
3464 dma_free_coherent(ring_to_dev(ring
), size
,
3465 ring
->desc
, ring
->desc_dma_addr
);
3470 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
3472 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
3474 ring
->desc
= dma_alloc_coherent(ring_to_dev(ring
), size
,
3475 &ring
->desc_dma_addr
, GFP_KERNEL
);
3482 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring
*ring
,
3483 struct hns3_desc_cb
*cb
)
3487 ret
= hns3_alloc_buffer(ring
, cb
);
3488 if (ret
|| ring
->page_pool
)
3491 ret
= hns3_map_buffer(ring
, cb
);
3498 hns3_free_buffer(ring
, cb
, 0);
3503 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring
*ring
, int i
)
3505 int ret
= hns3_alloc_and_map_buffer(ring
, &ring
->desc_cb
[i
]);
3510 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3511 ring
->desc_cb
[i
].page_offset
);
3512 ring
->desc_cb
[i
].refill
= 1;
3517 /* Allocate memory for raw pkg, and map with dma */
3518 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
3522 for (i
= 0; i
< ring
->desc_num
; i
++) {
3523 ret
= hns3_alloc_and_attach_buffer(ring
, i
);
3525 goto out_buffer_fail
;
3531 for (j
= i
- 1; j
>= 0; j
--)
3532 hns3_free_buffer_detach(ring
, j
, 0);
3536 /* detach a in-used buffer and replace with a reserved one */
3537 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
3538 struct hns3_desc_cb
*res_cb
)
3540 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
3541 ring
->desc_cb
[i
] = *res_cb
;
3542 ring
->desc_cb
[i
].refill
= 1;
3543 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3544 ring
->desc_cb
[i
].page_offset
);
3545 ring
->desc
[i
].rx
.bd_base_info
= 0;
3548 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
3550 ring
->desc_cb
[i
].reuse_flag
= 0;
3551 ring
->desc_cb
[i
].refill
= 1;
3552 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
+
3553 ring
->desc_cb
[i
].page_offset
);
3554 ring
->desc
[i
].rx
.bd_base_info
= 0;
3556 dma_sync_single_for_device(ring_to_dev(ring
),
3557 ring
->desc_cb
[i
].dma
+ ring
->desc_cb
[i
].page_offset
,
3558 hns3_buf_size(ring
),
3562 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring
*ring
,
3563 int *bytes
, int *pkts
, int budget
)
3565 /* pair with ring->last_to_use update in hns3_tx_doorbell(),
3566 * smp_store_release() is not used in hns3_tx_doorbell() because
3567 * the doorbell operation already have the needed barrier operation.
3569 int ltu
= smp_load_acquire(&ring
->last_to_use
);
3570 int ntc
= ring
->next_to_clean
;
3571 struct hns3_desc_cb
*desc_cb
;
3572 bool reclaimed
= false;
3573 struct hns3_desc
*desc
;
3575 while (ltu
!= ntc
) {
3576 desc
= &ring
->desc
[ntc
];
3578 if (le16_to_cpu(desc
->tx
.bdtp_fe_sc_vld_ra_ri
) &
3579 BIT(HNS3_TXD_VLD_B
))
3582 desc_cb
= &ring
->desc_cb
[ntc
];
3584 if (desc_cb
->type
& (DESC_TYPE_SKB
| DESC_TYPE_BOUNCE_ALL
|
3585 DESC_TYPE_BOUNCE_HEAD
|
3586 DESC_TYPE_SGL_SKB
)) {
3588 (*bytes
) += desc_cb
->send_bytes
;
3591 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3592 hns3_free_buffer_detach(ring
, ntc
, budget
);
3594 if (++ntc
== ring
->desc_num
)
3597 /* Issue prefetch for next Tx descriptor */
3598 prefetch(&ring
->desc_cb
[ntc
]);
3602 if (unlikely(!reclaimed
))
3605 /* This smp_store_release() pairs with smp_load_acquire() in
3606 * ring_space called by hns3_nic_net_xmit.
3608 smp_store_release(&ring
->next_to_clean
, ntc
);
3610 hns3_tx_spare_update(ring
);
3615 void hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
3617 struct net_device
*netdev
= ring_to_netdev(ring
);
3618 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3619 struct netdev_queue
*dev_queue
;
3625 if (unlikely(!hns3_nic_reclaim_desc(ring
, &bytes
, &pkts
, budget
)))
3628 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
3629 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
3631 u64_stats_update_begin(&ring
->syncp
);
3632 ring
->stats
.tx_bytes
+= bytes
;
3633 ring
->stats
.tx_pkts
+= pkts
;
3634 u64_stats_update_end(&ring
->syncp
);
3636 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
3637 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
3639 if (unlikely(netif_carrier_ok(netdev
) &&
3640 ring_space(ring
) > HNS3_MAX_TSO_BD_NUM
)) {
3641 /* Make sure that anybody stopping the queue after this
3642 * sees the new next_to_clean.
3645 if (netif_tx_queue_stopped(dev_queue
) &&
3646 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
3647 netif_tx_wake_queue(dev_queue
);
3648 ring
->stats
.restart_queue
++;
3653 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
3655 int ntc
= ring
->next_to_clean
;
3656 int ntu
= ring
->next_to_use
;
3658 if (unlikely(ntc
== ntu
&& !ring
->desc_cb
[ntc
].refill
))
3659 return ring
->desc_num
;
3661 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
3664 /* Return true if there is any allocation failure */
3665 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
,
3668 struct hns3_desc_cb
*desc_cb
;
3669 struct hns3_desc_cb res_cbs
;
3672 for (i
= 0; i
< cleand_count
; i
++) {
3673 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
3674 if (desc_cb
->reuse_flag
) {
3675 hns3_ring_stats_update(ring
, reuse_pg_cnt
);
3677 hns3_reuse_buffer(ring
, ring
->next_to_use
);
3679 ret
= hns3_alloc_and_map_buffer(ring
, &res_cbs
);
3681 hns3_ring_stats_update(ring
, sw_err_cnt
);
3683 hns3_rl_err(ring_to_netdev(ring
),
3684 "alloc rx buffer failed: %d\n",
3687 writel(i
, ring
->tqp
->io_base
+
3688 HNS3_RING_RX_RING_HEAD_REG
);
3691 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
3693 hns3_ring_stats_update(ring
, non_reuse_pg
);
3696 ring_ptr_move_fw(ring
, next_to_use
);
3699 writel(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
3703 static bool hns3_can_reuse_page(struct hns3_desc_cb
*cb
)
3705 return page_count(cb
->priv
) == cb
->pagecnt_bias
;
3708 static int hns3_handle_rx_copybreak(struct sk_buff
*skb
, int i
,
3709 struct hns3_enet_ring
*ring
,
3711 struct hns3_desc_cb
*desc_cb
)
3713 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_clean
];
3714 u32 frag_offset
= desc_cb
->page_offset
+ pull_len
;
3715 int size
= le16_to_cpu(desc
->rx
.size
);
3716 u32 frag_size
= size
- pull_len
;
3717 void *frag
= napi_alloc_frag(frag_size
);
3719 if (unlikely(!frag
)) {
3720 hns3_ring_stats_update(ring
, frag_alloc_err
);
3722 hns3_rl_err(ring_to_netdev(ring
),
3723 "failed to allocate rx frag\n");
3727 desc_cb
->reuse_flag
= 1;
3728 memcpy(frag
, desc_cb
->buf
+ frag_offset
, frag_size
);
3729 skb_add_rx_frag(skb
, i
, virt_to_page(frag
),
3730 offset_in_page(frag
), frag_size
, frag_size
);
3732 hns3_ring_stats_update(ring
, frag_alloc
);
3736 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
3737 struct hns3_enet_ring
*ring
, int pull_len
,
3738 struct hns3_desc_cb
*desc_cb
)
3740 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_clean
];
3741 u32 frag_offset
= desc_cb
->page_offset
+ pull_len
;
3742 int size
= le16_to_cpu(desc
->rx
.size
);
3743 u32 truesize
= hns3_buf_size(ring
);
3744 u32 frag_size
= size
- pull_len
;
3748 if (ring
->page_pool
) {
3749 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, frag_offset
,
3750 frag_size
, truesize
);
3754 /* Avoid re-using remote or pfmem page */
3755 if (unlikely(!dev_page_is_reusable(desc_cb
->priv
)))
3758 reused
= hns3_can_reuse_page(desc_cb
);
3760 /* Rx page can be reused when:
3761 * 1. Rx page is only owned by the driver when page_offset
3762 * is zero, which means 0 @ truesize will be used by
3763 * stack after skb_add_rx_frag() is called, and the rest
3764 * of rx page can be reused by driver.
3766 * 2. Rx page is only owned by the driver when page_offset
3767 * is non-zero, which means page_offset @ truesize will
3768 * be used by stack after skb_add_rx_frag() is called,
3769 * and 0 @ truesize can be reused by driver.
3771 if ((!desc_cb
->page_offset
&& reused
) ||
3772 ((desc_cb
->page_offset
+ truesize
+ truesize
) <=
3773 hns3_page_size(ring
) && desc_cb
->page_offset
)) {
3774 desc_cb
->page_offset
+= truesize
;
3775 desc_cb
->reuse_flag
= 1;
3776 } else if (desc_cb
->page_offset
&& reused
) {
3777 desc_cb
->page_offset
= 0;
3778 desc_cb
->reuse_flag
= 1;
3779 } else if (frag_size
<= ring
->rx_copybreak
) {
3780 ret
= hns3_handle_rx_copybreak(skb
, i
, ring
, pull_len
, desc_cb
);
3786 desc_cb
->pagecnt_bias
--;
3788 if (unlikely(!desc_cb
->pagecnt_bias
)) {
3789 page_ref_add(desc_cb
->priv
, USHRT_MAX
);
3790 desc_cb
->pagecnt_bias
= USHRT_MAX
;
3793 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, frag_offset
,
3794 frag_size
, truesize
);
3796 if (unlikely(!desc_cb
->reuse_flag
))
3797 __page_frag_cache_drain(desc_cb
->priv
, desc_cb
->pagecnt_bias
);
3800 static int hns3_gro_complete(struct sk_buff
*skb
, u32 l234info
)
3802 __be16 type
= skb
->protocol
;
3806 while (eth_type_vlan(type
)) {
3807 struct vlan_hdr
*vh
;
3809 if ((depth
+ VLAN_HLEN
) > skb_headlen(skb
))
3812 vh
= (struct vlan_hdr
*)(skb
->data
+ depth
);
3813 type
= vh
->h_vlan_encapsulated_proto
;
3817 skb_set_network_header(skb
, depth
);
3819 if (type
== htons(ETH_P_IP
)) {
3820 const struct iphdr
*iph
= ip_hdr(skb
);
3822 depth
+= sizeof(struct iphdr
);
3823 skb_set_transport_header(skb
, depth
);
3825 th
->check
= ~tcp_v4_check(skb
->len
- depth
, iph
->saddr
,
3827 } else if (type
== htons(ETH_P_IPV6
)) {
3828 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
3830 depth
+= sizeof(struct ipv6hdr
);
3831 skb_set_transport_header(skb
, depth
);
3833 th
->check
= ~tcp_v6_check(skb
->len
- depth
, &iph
->saddr
,
3836 hns3_rl_err(skb
->dev
,
3837 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3838 be16_to_cpu(type
), depth
);
3842 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
3844 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
3846 if (l234info
& BIT(HNS3_RXD_GRO_FIXID_B
))
3847 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_FIXEDID
;
3849 skb
->csum_start
= (unsigned char *)th
- skb
->head
;
3850 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
3851 skb
->ip_summed
= CHECKSUM_PARTIAL
;
3853 trace_hns3_gro(skb
);
3858 static void hns3_checksum_complete(struct hns3_enet_ring
*ring
,
3859 struct sk_buff
*skb
, u32 ptype
, u16 csum
)
3861 if (ptype
== HNS3_INVALID_PTYPE
||
3862 hns3_rx_ptype_tbl
[ptype
].ip_summed
!= CHECKSUM_COMPLETE
)
3865 hns3_ring_stats_update(ring
, csum_complete
);
3866 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3867 skb
->csum
= csum_unfold((__force __sum16
)csum
);
3870 static void hns3_rx_handle_csum(struct sk_buff
*skb
, u32 l234info
,
3871 u32 ol_info
, u32 ptype
)
3873 int l3_type
, l4_type
;
3876 if (ptype
!= HNS3_INVALID_PTYPE
) {
3877 skb
->csum_level
= hns3_rx_ptype_tbl
[ptype
].csum_level
;
3878 skb
->ip_summed
= hns3_rx_ptype_tbl
[ptype
].ip_summed
;
3883 ol4_type
= hnae3_get_field(ol_info
, HNS3_RXD_OL4ID_M
,
3886 case HNS3_OL4_TYPE_MAC_IN_UDP
:
3887 case HNS3_OL4_TYPE_NVGRE
:
3888 skb
->csum_level
= 1;
3890 case HNS3_OL4_TYPE_NO_TUN
:
3891 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
3893 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
3895 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3896 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
3897 l3_type
== HNS3_L3_TYPE_IPV6
) &&
3898 (l4_type
== HNS3_L4_TYPE_UDP
||
3899 l4_type
== HNS3_L4_TYPE_TCP
||
3900 l4_type
== HNS3_L4_TYPE_SCTP
))
3901 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3908 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
3909 u32 l234info
, u32 bd_base_info
, u32 ol_info
,
3912 struct net_device
*netdev
= ring_to_netdev(ring
);
3913 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3914 u32 ptype
= HNS3_INVALID_PTYPE
;
3916 skb
->ip_summed
= CHECKSUM_NONE
;
3918 skb_checksum_none_assert(skb
);
3920 if (!(netdev
->features
& NETIF_F_RXCSUM
))
3923 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
))
3924 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
3927 hns3_checksum_complete(ring
, skb
, ptype
, csum
);
3929 /* check if hardware has done checksum */
3930 if (!(bd_base_info
& BIT(HNS3_RXD_L3L4P_B
)))
3933 if (unlikely(l234info
& (BIT(HNS3_RXD_L3E_B
) | BIT(HNS3_RXD_L4E_B
) |
3934 BIT(HNS3_RXD_OL3E_B
) |
3935 BIT(HNS3_RXD_OL4E_B
)))) {
3936 skb
->ip_summed
= CHECKSUM_NONE
;
3937 hns3_ring_stats_update(ring
, l3l4_csum_err
);
3942 hns3_rx_handle_csum(skb
, l234info
, ol_info
, ptype
);
3945 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3947 if (skb_has_frag_list(skb
))
3948 napi_gro_flush(&ring
->tqp_vector
->napi
, false);
3950 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
3953 static bool hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
3954 struct hns3_desc
*desc
, u32 l234info
,
3957 struct hnae3_handle
*handle
= ring
->tqp
->handle
;
3958 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
3959 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
3961 if (unlikely(ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)) {
3962 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
3963 if (!(*vlan_tag
& VLAN_VID_MASK
))
3964 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
3966 return (*vlan_tag
!= 0);
3969 #define HNS3_STRP_OUTER_VLAN 0x1
3970 #define HNS3_STRP_INNER_VLAN 0x2
3971 #define HNS3_STRP_BOTH 0x3
3973 /* Hardware always insert VLAN tag into RX descriptor when
3974 * remove the tag from packet, driver needs to determine
3975 * reporting which tag to stack.
3977 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
3978 HNS3_RXD_STRP_TAGP_S
)) {
3979 case HNS3_STRP_OUTER_VLAN
:
3980 if (handle
->port_base_vlan_state
!=
3981 HNAE3_PORT_BASE_VLAN_DISABLE
)
3984 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
3986 case HNS3_STRP_INNER_VLAN
:
3987 if (handle
->port_base_vlan_state
!=
3988 HNAE3_PORT_BASE_VLAN_DISABLE
)
3991 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
3993 case HNS3_STRP_BOTH
:
3994 if (handle
->port_base_vlan_state
==
3995 HNAE3_PORT_BASE_VLAN_DISABLE
)
3996 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
3998 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
4006 static void hns3_rx_ring_move_fw(struct hns3_enet_ring
*ring
)
4008 ring
->desc
[ring
->next_to_clean
].rx
.bd_base_info
&=
4009 cpu_to_le32(~BIT(HNS3_RXD_VLD_B
));
4010 ring
->desc_cb
[ring
->next_to_clean
].refill
= 0;
4011 ring
->next_to_clean
+= 1;
4013 if (unlikely(ring
->next_to_clean
== ring
->desc_num
))
4014 ring
->next_to_clean
= 0;
4017 static int hns3_alloc_skb(struct hns3_enet_ring
*ring
, unsigned int length
,
4020 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4021 struct net_device
*netdev
= ring_to_netdev(ring
);
4022 struct sk_buff
*skb
;
4024 ring
->skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, HNS3_RX_HEAD_SIZE
);
4026 if (unlikely(!skb
)) {
4027 hns3_rl_err(netdev
, "alloc rx skb fail\n");
4028 hns3_ring_stats_update(ring
, sw_err_cnt
);
4033 trace_hns3_rx_desc(ring
);
4034 prefetchw(skb
->data
);
4036 ring
->pending_buf
= 1;
4038 ring
->tail_skb
= NULL
;
4039 if (length
<= HNS3_RX_HEAD_SIZE
) {
4040 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
4042 /* We can reuse buffer as-is, just make sure it is reusable */
4043 if (dev_page_is_reusable(desc_cb
->priv
))
4044 desc_cb
->reuse_flag
= 1;
4045 else if (desc_cb
->type
& DESC_TYPE_PP_FRAG
)
4046 page_pool_put_full_page(ring
->page_pool
, desc_cb
->priv
,
4048 else /* This page cannot be reused so discard it */
4049 __page_frag_cache_drain(desc_cb
->priv
,
4050 desc_cb
->pagecnt_bias
);
4052 hns3_rx_ring_move_fw(ring
);
4056 if (ring
->page_pool
)
4057 skb_mark_for_recycle(skb
);
4059 hns3_ring_stats_update(ring
, seg_pkt_cnt
);
4061 ring
->pull_len
= eth_get_headlen(netdev
, va
, HNS3_RX_HEAD_SIZE
);
4062 __skb_put(skb
, ring
->pull_len
);
4063 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, ring
->pull_len
,
4065 hns3_rx_ring_move_fw(ring
);
4070 static int hns3_add_frag(struct hns3_enet_ring
*ring
)
4072 struct sk_buff
*skb
= ring
->skb
;
4073 struct sk_buff
*head_skb
= skb
;
4074 struct sk_buff
*new_skb
;
4075 struct hns3_desc_cb
*desc_cb
;
4076 struct hns3_desc
*desc
;
4080 desc
= &ring
->desc
[ring
->next_to_clean
];
4081 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4082 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4083 /* make sure HW write desc complete */
4085 if (!(bd_base_info
& BIT(HNS3_RXD_VLD_B
)))
4088 if (unlikely(ring
->frag_num
>= MAX_SKB_FRAGS
)) {
4089 new_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, 0);
4090 if (unlikely(!new_skb
)) {
4091 hns3_rl_err(ring_to_netdev(ring
),
4092 "alloc rx fraglist skb fail\n");
4096 if (ring
->page_pool
)
4097 skb_mark_for_recycle(new_skb
);
4101 if (ring
->tail_skb
) {
4102 ring
->tail_skb
->next
= new_skb
;
4103 ring
->tail_skb
= new_skb
;
4105 skb_shinfo(skb
)->frag_list
= new_skb
;
4106 ring
->tail_skb
= new_skb
;
4110 if (ring
->tail_skb
) {
4111 head_skb
->truesize
+= hns3_buf_size(ring
);
4112 head_skb
->data_len
+= le16_to_cpu(desc
->rx
.size
);
4113 head_skb
->len
+= le16_to_cpu(desc
->rx
.size
);
4114 skb
= ring
->tail_skb
;
4117 dma_sync_single_for_cpu(ring_to_dev(ring
),
4118 desc_cb
->dma
+ desc_cb
->page_offset
,
4119 hns3_buf_size(ring
),
4122 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, 0, desc_cb
);
4123 trace_hns3_rx_desc(ring
);
4124 hns3_rx_ring_move_fw(ring
);
4125 ring
->pending_buf
++;
4126 } while (!(bd_base_info
& BIT(HNS3_RXD_FE_B
)));
4131 static int hns3_set_gro_and_checksum(struct hns3_enet_ring
*ring
,
4132 struct sk_buff
*skb
, u32 l234info
,
4133 u32 bd_base_info
, u32 ol_info
, u16 csum
)
4135 struct net_device
*netdev
= ring_to_netdev(ring
);
4136 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4139 skb_shinfo(skb
)->gso_size
= hnae3_get_field(bd_base_info
,
4140 HNS3_RXD_GRO_SIZE_M
,
4141 HNS3_RXD_GRO_SIZE_S
);
4142 /* if there is no HW GRO, do not set gro params */
4143 if (!skb_shinfo(skb
)->gso_size
) {
4144 hns3_rx_checksum(ring
, skb
, l234info
, bd_base_info
, ol_info
,
4149 NAPI_GRO_CB(skb
)->count
= hnae3_get_field(l234info
,
4150 HNS3_RXD_GRO_COUNT_M
,
4151 HNS3_RXD_GRO_COUNT_S
);
4153 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
)) {
4154 u32 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
4157 l3_type
= hns3_rx_ptype_tbl
[ptype
].l3_type
;
4159 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
4163 if (l3_type
== HNS3_L3_TYPE_IPV4
)
4164 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
4165 else if (l3_type
== HNS3_L3_TYPE_IPV6
)
4166 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
4170 return hns3_gro_complete(skb
, l234info
);
4173 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring
*ring
,
4174 struct sk_buff
*skb
, u32 rss_hash
,
4175 u32 l234info
, u32 ol_info
)
4177 enum pkt_hash_types rss_type
= PKT_HASH_TYPE_NONE
;
4178 struct net_device
*netdev
= ring_to_netdev(ring
);
4179 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4181 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
)) {
4182 u32 ptype
= hnae3_get_field(ol_info
, HNS3_RXD_PTYPE_M
,
4185 rss_type
= hns3_rx_ptype_tbl
[ptype
].hash_type
;
4187 int l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
4189 int l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
4192 if (l3_type
== HNS3_L3_TYPE_IPV4
||
4193 l3_type
== HNS3_L3_TYPE_IPV6
) {
4194 if (l4_type
== HNS3_L4_TYPE_UDP
||
4195 l4_type
== HNS3_L4_TYPE_TCP
||
4196 l4_type
== HNS3_L4_TYPE_SCTP
)
4197 rss_type
= PKT_HASH_TYPE_L4
;
4198 else if (l4_type
== HNS3_L4_TYPE_IGMP
||
4199 l4_type
== HNS3_L4_TYPE_ICMP
)
4200 rss_type
= PKT_HASH_TYPE_L3
;
4204 skb_set_hash(skb
, rss_hash
, rss_type
);
4207 static void hns3_handle_rx_ts_info(struct net_device
*netdev
,
4208 struct hns3_desc
*desc
, struct sk_buff
*skb
,
4211 if (unlikely(bd_base_info
& BIT(HNS3_RXD_TS_VLD_B
))) {
4212 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4213 u32 nsec
= le32_to_cpu(desc
->ts_nsec
);
4214 u32 sec
= le32_to_cpu(desc
->ts_sec
);
4216 if (h
->ae_algo
->ops
->get_rx_hwts
)
4217 h
->ae_algo
->ops
->get_rx_hwts(h
, skb
, nsec
, sec
);
4221 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring
*ring
,
4222 struct hns3_desc
*desc
, struct sk_buff
*skb
,
4225 struct net_device
*netdev
= ring_to_netdev(ring
);
4227 /* Based on hw strategy, the tag offloaded will be stored at
4228 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4229 * in one layer tag case.
4231 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
4234 if (hns3_parse_vlan_tag(ring
, desc
, l234info
, &vlan_tag
))
4235 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
4240 static int hns3_handle_bdinfo(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
4242 struct net_device
*netdev
= ring_to_netdev(ring
);
4243 enum hns3_pkt_l2t_type l2_frame_type
;
4244 u32 bd_base_info
, l234info
, ol_info
;
4245 struct hns3_desc
*desc
;
4250 /* bdinfo handled below is only valid on the last BD of the
4251 * current packet, and ring->next_to_clean indicates the first
4252 * descriptor of next packet, so need - 1 below.
4254 pre_ntc
= ring
->next_to_clean
? (ring
->next_to_clean
- 1) :
4255 (ring
->desc_num
- 1);
4256 desc
= &ring
->desc
[pre_ntc
];
4257 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4258 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
4259 ol_info
= le32_to_cpu(desc
->rx
.ol_info
);
4260 csum
= le16_to_cpu(desc
->csum
);
4262 hns3_handle_rx_ts_info(netdev
, desc
, skb
, bd_base_info
);
4264 hns3_handle_rx_vlan_tag(ring
, desc
, skb
, l234info
);
4266 if (unlikely(!desc
->rx
.pkt_len
|| (l234info
& (BIT(HNS3_RXD_TRUNCAT_B
) |
4267 BIT(HNS3_RXD_L2E_B
))))) {
4268 u64_stats_update_begin(&ring
->syncp
);
4269 if (l234info
& BIT(HNS3_RXD_L2E_B
))
4270 ring
->stats
.l2_err
++;
4272 ring
->stats
.err_pkt_len
++;
4273 u64_stats_update_end(&ring
->syncp
);
4280 /* Do update ip stack process */
4281 skb
->protocol
= eth_type_trans(skb
, netdev
);
4283 /* This is needed in order to enable forwarding support */
4284 ret
= hns3_set_gro_and_checksum(ring
, skb
, l234info
,
4285 bd_base_info
, ol_info
, csum
);
4286 if (unlikely(ret
)) {
4287 hns3_ring_stats_update(ring
, rx_err_cnt
);
4291 l2_frame_type
= hnae3_get_field(l234info
, HNS3_RXD_DMAC_M
,
4294 u64_stats_update_begin(&ring
->syncp
);
4295 ring
->stats
.rx_pkts
++;
4296 ring
->stats
.rx_bytes
+= len
;
4298 if (l2_frame_type
== HNS3_L2_TYPE_MULTICAST
)
4299 ring
->stats
.rx_multicast
++;
4301 u64_stats_update_end(&ring
->syncp
);
4303 ring
->tqp_vector
->rx_group
.total_bytes
+= len
;
4305 hns3_set_rx_skb_rss_type(ring
, skb
, le32_to_cpu(desc
->rx
.rss_hash
),
4310 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
)
4312 struct sk_buff
*skb
= ring
->skb
;
4313 struct hns3_desc_cb
*desc_cb
;
4314 struct hns3_desc
*desc
;
4315 unsigned int length
;
4319 desc
= &ring
->desc
[ring
->next_to_clean
];
4320 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
4325 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
4326 /* Check valid BD */
4327 if (unlikely(!(bd_base_info
& BIT(HNS3_RXD_VLD_B
))))
4331 length
= le16_to_cpu(desc
->rx
.size
);
4333 ring
->va
= desc_cb
->buf
+ desc_cb
->page_offset
;
4335 dma_sync_single_for_cpu(ring_to_dev(ring
),
4336 desc_cb
->dma
+ desc_cb
->page_offset
,
4337 hns3_buf_size(ring
),
4340 /* Prefetch first cache line of first page.
4341 * Idea is to cache few bytes of the header of the packet.
4342 * Our L1 Cache line size is 64B so need to prefetch twice to make
4343 * it 128B. But in actual we can have greater size of caches with
4344 * 128B Level 1 cache lines. In such a case, single fetch would
4345 * suffice to cache in the relevant part of the header.
4347 net_prefetch(ring
->va
);
4349 ret
= hns3_alloc_skb(ring
, length
, ring
->va
);
4352 if (ret
< 0) /* alloc buffer fail */
4354 if (!(bd_base_info
& BIT(HNS3_RXD_FE_B
))) { /* need add frag */
4355 ret
= hns3_add_frag(ring
);
4360 ret
= hns3_add_frag(ring
);
4365 /* As the head data may be changed when GRO enable, copy
4366 * the head data in after other data rx completed
4368 if (skb
->len
> HNS3_RX_HEAD_SIZE
)
4369 memcpy(skb
->data
, ring
->va
,
4370 ALIGN(ring
->pull_len
, sizeof(long)));
4372 ret
= hns3_handle_bdinfo(ring
, skb
);
4373 if (unlikely(ret
)) {
4374 dev_kfree_skb_any(skb
);
4378 skb_record_rx_queue(skb
, ring
->tqp
->tqp_index
);
4382 int hns3_clean_rx_ring(struct hns3_enet_ring
*ring
, int budget
,
4383 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
4385 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4386 int unused_count
= hns3_desc_unused(ring
);
4387 bool failure
= false;
4391 unused_count
-= ring
->pending_buf
;
4393 while (recv_pkts
< budget
) {
4394 /* Reuse or realloc buffers */
4395 if (unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
4396 failure
= failure
||
4397 hns3_nic_alloc_rx_buffers(ring
, unused_count
);
4402 err
= hns3_handle_rx_bd(ring
);
4403 /* Do not get FE for the packet or failed to alloc skb */
4404 if (unlikely(!ring
->skb
|| err
== -ENXIO
)) {
4406 } else if (likely(!err
)) {
4407 rx_fn(ring
, ring
->skb
);
4411 unused_count
+= ring
->pending_buf
;
4413 ring
->pending_buf
= 0;
4417 /* sync head pointer before exiting, since hardware will calculate
4418 * FBD number with head pointer
4420 if (unused_count
> 0)
4421 failure
= failure
||
4422 hns3_nic_alloc_rx_buffers(ring
, unused_count
);
4424 return failure
? budget
: recv_pkts
;
4427 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector
*tqp_vector
)
4429 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
4430 struct dim_sample sample
= {};
4432 if (!rx_group
->coal
.adapt_enable
)
4435 dim_update_sample(tqp_vector
->event_cnt
, rx_group
->total_packets
,
4436 rx_group
->total_bytes
, &sample
);
4437 net_dim(&rx_group
->dim
, sample
);
4440 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector
*tqp_vector
)
4442 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
4443 struct dim_sample sample
= {};
4445 if (!tx_group
->coal
.adapt_enable
)
4448 dim_update_sample(tqp_vector
->event_cnt
, tx_group
->total_packets
,
4449 tx_group
->total_bytes
, &sample
);
4450 net_dim(&tx_group
->dim
, sample
);
4453 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
4455 struct hns3_nic_priv
*priv
= netdev_priv(napi
->dev
);
4456 struct hns3_enet_ring
*ring
;
4457 int rx_pkt_total
= 0;
4459 struct hns3_enet_tqp_vector
*tqp_vector
=
4460 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
4461 bool clean_complete
= true;
4462 int rx_budget
= budget
;
4464 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
4465 napi_complete(napi
);
4469 /* Since the actual Tx work is minimal, we can give the Tx a larger
4470 * budget and be more aggressive about cleaning up the Tx descriptors.
4472 hns3_for_each_ring(ring
, tqp_vector
->tx_group
)
4473 hns3_clean_tx_ring(ring
, budget
);
4475 /* make sure rx ring budget not smaller than 1 */
4476 if (tqp_vector
->num_tqps
> 1)
4477 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
4479 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
4480 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
4482 if (rx_cleaned
>= rx_budget
)
4483 clean_complete
= false;
4485 rx_pkt_total
+= rx_cleaned
;
4488 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
4490 if (!clean_complete
)
4493 if (napi_complete(napi
) &&
4494 likely(!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
4495 hns3_update_rx_int_coalesce(tqp_vector
);
4496 hns3_update_tx_int_coalesce(tqp_vector
);
4498 hns3_mask_vector_irq(tqp_vector
, 1);
4501 return rx_pkt_total
;
4504 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
4505 struct hnae3_ring_chain_node
**head
,
4508 u32 bit_value
= is_tx
? HNAE3_RING_TYPE_TX
: HNAE3_RING_TYPE_RX
;
4509 u32 field_value
= is_tx
? HNAE3_RING_GL_TX
: HNAE3_RING_GL_RX
;
4510 struct hnae3_ring_chain_node
*cur_chain
= *head
;
4511 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4512 struct hnae3_ring_chain_node
*chain
;
4513 struct hns3_enet_ring
*ring
;
4515 ring
= is_tx
? tqp_vector
->tx_group
.ring
: tqp_vector
->rx_group
.ring
;
4518 while (cur_chain
->next
)
4519 cur_chain
= cur_chain
->next
;
4523 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
4527 cur_chain
->next
= chain
;
4530 chain
->tqp_index
= ring
->tqp
->tqp_index
;
4531 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
4533 hnae3_set_field(chain
->int_gl_idx
,
4534 HNAE3_RING_GL_IDX_M
,
4535 HNAE3_RING_GL_IDX_S
, field_value
);
4545 static struct hnae3_ring_chain_node
*
4546 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
)
4548 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4549 struct hnae3_ring_chain_node
*cur_chain
= NULL
;
4550 struct hnae3_ring_chain_node
*chain
;
4552 if (hns3_create_ring_chain(tqp_vector
, &cur_chain
, true))
4553 goto err_free_chain
;
4555 if (hns3_create_ring_chain(tqp_vector
, &cur_chain
, false))
4556 goto err_free_chain
;
4562 chain
= cur_chain
->next
;
4563 devm_kfree(&pdev
->dev
, cur_chain
);
4570 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
4571 struct hnae3_ring_chain_node
*head
)
4573 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
4574 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
4579 chain_tmp
= chain
->next
;
4580 devm_kfree(&pdev
->dev
, chain
);
4585 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
4586 struct hns3_enet_ring
*ring
)
4588 ring
->next
= group
->ring
;
4594 static void hns3_nic_set_cpumask(struct hns3_nic_priv
*priv
)
4596 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
4597 struct hns3_enet_tqp_vector
*tqp_vector
;
4598 int num_vectors
= priv
->vector_num
;
4602 numa_node
= dev_to_node(&pdev
->dev
);
4604 for (vector_i
= 0; vector_i
< num_vectors
; vector_i
++) {
4605 tqp_vector
= &priv
->tqp_vector
[vector_i
];
4606 cpumask_set_cpu(cpumask_local_spread(vector_i
, numa_node
),
4607 &tqp_vector
->affinity_mask
);
4611 static void hns3_rx_dim_work(struct work_struct
*work
)
4613 struct dim
*dim
= container_of(work
, struct dim
, work
);
4614 struct hns3_enet_ring_group
*group
= container_of(dim
,
4615 struct hns3_enet_ring_group
, dim
);
4616 struct hns3_enet_tqp_vector
*tqp_vector
= group
->ring
->tqp_vector
;
4617 struct dim_cq_moder cur_moder
=
4618 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
4620 hns3_set_vector_coalesce_rx_gl(group
->ring
->tqp_vector
, cur_moder
.usec
);
4621 tqp_vector
->rx_group
.coal
.int_gl
= cur_moder
.usec
;
4623 if (cur_moder
.pkts
< tqp_vector
->rx_group
.coal
.int_ql_max
) {
4624 hns3_set_vector_coalesce_rx_ql(tqp_vector
, cur_moder
.pkts
);
4625 tqp_vector
->rx_group
.coal
.int_ql
= cur_moder
.pkts
;
4628 dim
->state
= DIM_START_MEASURE
;
4631 static void hns3_tx_dim_work(struct work_struct
*work
)
4633 struct dim
*dim
= container_of(work
, struct dim
, work
);
4634 struct hns3_enet_ring_group
*group
= container_of(dim
,
4635 struct hns3_enet_ring_group
, dim
);
4636 struct hns3_enet_tqp_vector
*tqp_vector
= group
->ring
->tqp_vector
;
4637 struct dim_cq_moder cur_moder
=
4638 net_dim_get_tx_moderation(dim
->mode
, dim
->profile_ix
);
4640 hns3_set_vector_coalesce_tx_gl(tqp_vector
, cur_moder
.usec
);
4641 tqp_vector
->tx_group
.coal
.int_gl
= cur_moder
.usec
;
4643 if (cur_moder
.pkts
< tqp_vector
->tx_group
.coal
.int_ql_max
) {
4644 hns3_set_vector_coalesce_tx_ql(tqp_vector
, cur_moder
.pkts
);
4645 tqp_vector
->tx_group
.coal
.int_ql
= cur_moder
.pkts
;
4648 dim
->state
= DIM_START_MEASURE
;
4651 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector
*tqp_vector
)
4653 INIT_WORK(&tqp_vector
->rx_group
.dim
.work
, hns3_rx_dim_work
);
4654 INIT_WORK(&tqp_vector
->tx_group
.dim
.work
, hns3_tx_dim_work
);
4657 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
4659 struct hnae3_handle
*h
= priv
->ae_handle
;
4660 struct hns3_enet_tqp_vector
*tqp_vector
;
4664 hns3_nic_set_cpumask(priv
);
4666 for (i
= 0; i
< priv
->vector_num
; i
++) {
4667 tqp_vector
= &priv
->tqp_vector
[i
];
4668 hns3_vector_coalesce_init_hw(tqp_vector
, priv
);
4669 tqp_vector
->num_tqps
= 0;
4670 hns3_nic_init_dim(tqp_vector
);
4673 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
4674 u16 vector_i
= i
% priv
->vector_num
;
4675 u16 tqp_num
= h
->kinfo
.num_tqps
;
4677 tqp_vector
= &priv
->tqp_vector
[vector_i
];
4679 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
4682 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
4683 &priv
->ring
[i
+ tqp_num
]);
4685 priv
->ring
[i
].tqp_vector
= tqp_vector
;
4686 priv
->ring
[i
+ tqp_num
].tqp_vector
= tqp_vector
;
4687 tqp_vector
->num_tqps
++;
4690 for (i
= 0; i
< priv
->vector_num
; i
++) {
4691 struct hnae3_ring_chain_node
*vector_ring_chain
;
4693 tqp_vector
= &priv
->tqp_vector
[i
];
4695 tqp_vector
->rx_group
.total_bytes
= 0;
4696 tqp_vector
->rx_group
.total_packets
= 0;
4697 tqp_vector
->tx_group
.total_bytes
= 0;
4698 tqp_vector
->tx_group
.total_packets
= 0;
4699 tqp_vector
->handle
= h
;
4701 vector_ring_chain
= hns3_get_vector_ring_chain(tqp_vector
);
4702 if (!vector_ring_chain
) {
4707 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
4708 tqp_vector
->vector_irq
, vector_ring_chain
);
4710 hns3_free_vector_ring_chain(tqp_vector
, vector_ring_chain
);
4715 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
4716 hns3_nic_common_poll
);
4723 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
4728 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv
*priv
)
4730 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
4731 struct hns3_enet_coalesce
*tx_coal
= &priv
->tx_coal
;
4732 struct hns3_enet_coalesce
*rx_coal
= &priv
->rx_coal
;
4734 /* initialize the configuration for interrupt coalescing.
4735 * 1. GL (Interrupt Gap Limiter)
4736 * 2. RL (Interrupt Rate Limiter)
4737 * 3. QL (Interrupt Quantity Limiter)
4739 * Default: enable interrupt coalescing self-adaptive and GL
4741 tx_coal
->adapt_enable
= 1;
4742 rx_coal
->adapt_enable
= 1;
4744 tx_coal
->int_gl
= HNS3_INT_GL_50K
;
4745 rx_coal
->int_gl
= HNS3_INT_GL_50K
;
4747 rx_coal
->flow_level
= HNS3_FLOW_LOW
;
4748 tx_coal
->flow_level
= HNS3_FLOW_LOW
;
4750 if (ae_dev
->dev_specs
.int_ql_max
) {
4751 tx_coal
->int_ql
= HNS3_INT_QL_DEFAULT_CFG
;
4752 rx_coal
->int_ql
= HNS3_INT_QL_DEFAULT_CFG
;
4756 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
4758 struct hnae3_handle
*h
= priv
->ae_handle
;
4759 struct hns3_enet_tqp_vector
*tqp_vector
;
4760 struct hnae3_vector_info
*vector
;
4761 struct pci_dev
*pdev
= h
->pdev
;
4762 u16 tqp_num
= h
->kinfo
.num_tqps
;
4767 /* RSS size, cpu online and vector_num should be the same */
4768 /* Should consider 2p/4p later */
4769 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
4771 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
4776 /* save the actual available vector number */
4777 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
4779 priv
->vector_num
= vector_num
;
4780 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
4781 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
4783 if (!priv
->tqp_vector
) {
4788 for (i
= 0; i
< priv
->vector_num
; i
++) {
4789 tqp_vector
= &priv
->tqp_vector
[i
];
4790 tqp_vector
->idx
= i
;
4791 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
4792 tqp_vector
->vector_irq
= vector
[i
].vector
;
4793 hns3_vector_coalesce_init(tqp_vector
, priv
);
4797 devm_kfree(&pdev
->dev
, vector
);
4801 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
4807 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
4809 struct hnae3_ring_chain_node
*vector_ring_chain
;
4810 struct hnae3_handle
*h
= priv
->ae_handle
;
4811 struct hns3_enet_tqp_vector
*tqp_vector
;
4814 for (i
= 0; i
< priv
->vector_num
; i
++) {
4815 tqp_vector
= &priv
->tqp_vector
[i
];
4817 if (!tqp_vector
->rx_group
.ring
&& !tqp_vector
->tx_group
.ring
)
4820 /* Since the mapping can be overwritten, when fail to get the
4821 * chain between vector and ring, we should go on to deal with
4822 * the remaining options.
4824 vector_ring_chain
= hns3_get_vector_ring_chain(tqp_vector
);
4825 if (!vector_ring_chain
)
4826 dev_warn(priv
->dev
, "failed to get ring chain\n");
4828 h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
4829 tqp_vector
->vector_irq
, vector_ring_chain
);
4831 hns3_free_vector_ring_chain(tqp_vector
, vector_ring_chain
);
4833 hns3_clear_ring_group(&tqp_vector
->rx_group
);
4834 hns3_clear_ring_group(&tqp_vector
->tx_group
);
4835 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
4839 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
4841 struct hnae3_handle
*h
= priv
->ae_handle
;
4842 struct pci_dev
*pdev
= h
->pdev
;
4845 for (i
= 0; i
< priv
->vector_num
; i
++) {
4846 struct hns3_enet_tqp_vector
*tqp_vector
;
4848 tqp_vector
= &priv
->tqp_vector
[i
];
4849 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
4854 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
4857 static void hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
4858 unsigned int ring_type
)
4860 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
4861 struct hns3_enet_ring
*ring
;
4864 if (ring_type
== HNAE3_RING_TYPE_TX
) {
4865 ring
= &priv
->ring
[q
->tqp_index
];
4866 desc_num
= priv
->ae_handle
->kinfo
.num_tx_desc
;
4867 ring
->queue_index
= q
->tqp_index
;
4868 ring
->tx_copybreak
= priv
->tx_copybreak
;
4869 ring
->last_to_use
= 0;
4871 ring
= &priv
->ring
[q
->tqp_index
+ queue_num
];
4872 desc_num
= priv
->ae_handle
->kinfo
.num_rx_desc
;
4873 ring
->queue_index
= q
->tqp_index
;
4874 ring
->rx_copybreak
= priv
->rx_copybreak
;
4877 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
4881 ring
->desc_cb
= NULL
;
4882 ring
->dev
= priv
->dev
;
4883 ring
->desc_dma_addr
= 0;
4884 ring
->buf_size
= q
->buf_size
;
4885 ring
->desc_num
= desc_num
;
4886 ring
->next_to_use
= 0;
4887 ring
->next_to_clean
= 0;
4890 static void hns3_queue_to_ring(struct hnae3_queue
*tqp
,
4891 struct hns3_nic_priv
*priv
)
4893 hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
4894 hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
4897 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
4899 struct hnae3_handle
*h
= priv
->ae_handle
;
4900 struct pci_dev
*pdev
= h
->pdev
;
4903 priv
->ring
= devm_kzalloc(&pdev
->dev
,
4904 array3_size(h
->kinfo
.num_tqps
,
4905 sizeof(*priv
->ring
), 2),
4910 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
4911 hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
4916 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
4921 devm_kfree(priv
->dev
, priv
->ring
);
4925 static void hns3_alloc_page_pool(struct hns3_enet_ring
*ring
)
4927 struct page_pool_params pp_params
= {
4928 .flags
= PP_FLAG_DMA_MAP
| PP_FLAG_PAGE_FRAG
|
4929 PP_FLAG_DMA_SYNC_DEV
,
4930 .order
= hns3_page_order(ring
),
4931 .pool_size
= ring
->desc_num
* hns3_buf_size(ring
) /
4932 (PAGE_SIZE
<< hns3_page_order(ring
)),
4933 .nid
= dev_to_node(ring_to_dev(ring
)),
4934 .dev
= ring_to_dev(ring
),
4935 .dma_dir
= DMA_FROM_DEVICE
,
4937 .max_len
= PAGE_SIZE
<< hns3_page_order(ring
),
4940 ring
->page_pool
= page_pool_create(&pp_params
);
4941 if (IS_ERR(ring
->page_pool
)) {
4942 dev_warn(ring_to_dev(ring
), "page pool creation failed: %ld\n",
4943 PTR_ERR(ring
->page_pool
));
4944 ring
->page_pool
= NULL
;
4948 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
4952 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
4955 ring
->desc_cb
= devm_kcalloc(ring_to_dev(ring
), ring
->desc_num
,
4956 sizeof(ring
->desc_cb
[0]), GFP_KERNEL
);
4957 if (!ring
->desc_cb
) {
4962 ret
= hns3_alloc_desc(ring
);
4964 goto out_with_desc_cb
;
4966 if (!HNAE3_IS_TX_RING(ring
)) {
4967 if (page_pool_enabled
)
4968 hns3_alloc_page_pool(ring
);
4970 ret
= hns3_alloc_ring_buffers(ring
);
4974 hns3_init_tx_spare_buffer(ring
);
4980 hns3_free_desc(ring
);
4982 devm_kfree(ring_to_dev(ring
), ring
->desc_cb
);
4983 ring
->desc_cb
= NULL
;
4988 void hns3_fini_ring(struct hns3_enet_ring
*ring
)
4990 hns3_free_desc(ring
);
4991 devm_kfree(ring_to_dev(ring
), ring
->desc_cb
);
4992 ring
->desc_cb
= NULL
;
4993 ring
->next_to_clean
= 0;
4994 ring
->next_to_use
= 0;
4995 ring
->last_to_use
= 0;
4996 ring
->pending_buf
= 0;
4997 if (!HNAE3_IS_TX_RING(ring
) && ring
->skb
) {
4998 dev_kfree_skb_any(ring
->skb
);
5000 } else if (HNAE3_IS_TX_RING(ring
) && ring
->tx_spare
) {
5001 struct hns3_tx_spare
*tx_spare
= ring
->tx_spare
;
5003 dma_unmap_page(ring_to_dev(ring
), tx_spare
->dma
, tx_spare
->len
,
5005 free_pages((unsigned long)tx_spare
->buf
,
5006 get_order(tx_spare
->len
));
5007 devm_kfree(ring_to_dev(ring
), tx_spare
);
5008 ring
->tx_spare
= NULL
;
5011 if (!HNAE3_IS_TX_RING(ring
) && ring
->page_pool
) {
5012 page_pool_destroy(ring
->page_pool
);
5013 ring
->page_pool
= NULL
;
5017 static int hns3_buf_size2type(u32 buf_size
)
5023 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
5026 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
5029 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
5032 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
5035 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
5038 return bd_size_type
;
5041 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
5043 dma_addr_t dma
= ring
->desc_dma_addr
;
5044 struct hnae3_queue
*q
= ring
->tqp
;
5046 if (!HNAE3_IS_TX_RING(ring
)) {
5047 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
, (u32
)dma
);
5048 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
5049 (u32
)((dma
>> 31) >> 1));
5051 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
5052 hns3_buf_size2type(ring
->buf_size
));
5053 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
5054 ring
->desc_num
/ 8 - 1);
5056 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
5058 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
5059 (u32
)((dma
>> 31) >> 1));
5061 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
5062 ring
->desc_num
/ 8 - 1);
5066 static void hns3_init_tx_ring_tc(struct hns3_nic_priv
*priv
)
5068 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
5069 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
5072 for (i
= 0; i
< tc_info
->num_tc
; i
++) {
5075 for (j
= 0; j
< tc_info
->tqp_count
[i
]; j
++) {
5076 struct hnae3_queue
*q
;
5078 q
= priv
->ring
[tc_info
->tqp_offset
[i
] + j
].tqp
;
5079 hns3_write_dev(q
, HNS3_RING_TX_RING_TC_REG
, i
);
5084 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
5086 struct hnae3_handle
*h
= priv
->ae_handle
;
5087 int ring_num
= h
->kinfo
.num_tqps
* 2;
5091 for (i
= 0; i
< ring_num
; i
++) {
5092 ret
= hns3_alloc_ring_memory(&priv
->ring
[i
]);
5095 "Alloc ring memory fail! ret=%d\n", ret
);
5096 goto out_when_alloc_ring_memory
;
5099 u64_stats_init(&priv
->ring
[i
].syncp
);
5104 out_when_alloc_ring_memory
:
5105 for (j
= i
- 1; j
>= 0; j
--)
5106 hns3_fini_ring(&priv
->ring
[j
]);
5111 static void hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
5113 struct hnae3_handle
*h
= priv
->ae_handle
;
5116 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5117 hns3_fini_ring(&priv
->ring
[i
]);
5118 hns3_fini_ring(&priv
->ring
[i
+ h
->kinfo
.num_tqps
]);
5122 /* Set mac addr if it is configured. or leave it to the AE driver */
5123 static int hns3_init_mac_addr(struct net_device
*netdev
)
5125 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5126 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
5127 struct hnae3_handle
*h
= priv
->ae_handle
;
5128 u8 mac_addr_temp
[ETH_ALEN
];
5131 if (h
->ae_algo
->ops
->get_mac_addr
)
5132 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
5134 /* Check if the MAC address is valid, if not get a random one */
5135 if (!is_valid_ether_addr(mac_addr_temp
)) {
5136 eth_hw_addr_random(netdev
);
5137 hnae3_format_mac_addr(format_mac_addr
, netdev
->dev_addr
);
5138 dev_warn(priv
->dev
, "using random MAC address %s\n",
5140 } else if (!ether_addr_equal(netdev
->dev_addr
, mac_addr_temp
)) {
5141 eth_hw_addr_set(netdev
, mac_addr_temp
);
5142 ether_addr_copy(netdev
->perm_addr
, mac_addr_temp
);
5147 if (h
->ae_algo
->ops
->set_mac_addr
)
5148 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
5153 static int hns3_init_phy(struct net_device
*netdev
)
5155 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5158 if (h
->ae_algo
->ops
->mac_connect_phy
)
5159 ret
= h
->ae_algo
->ops
->mac_connect_phy(h
);
5164 static void hns3_uninit_phy(struct net_device
*netdev
)
5166 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5168 if (h
->ae_algo
->ops
->mac_disconnect_phy
)
5169 h
->ae_algo
->ops
->mac_disconnect_phy(h
);
5172 static int hns3_client_start(struct hnae3_handle
*handle
)
5174 if (!handle
->ae_algo
->ops
->client_start
)
5177 return handle
->ae_algo
->ops
->client_start(handle
);
5180 static void hns3_client_stop(struct hnae3_handle
*handle
)
5182 if (!handle
->ae_algo
->ops
->client_stop
)
5185 handle
->ae_algo
->ops
->client_stop(handle
);
5188 static void hns3_info_show(struct hns3_nic_priv
*priv
)
5190 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
5191 char format_mac_addr
[HNAE3_FORMAT_MAC_ADDR_LEN
];
5193 hnae3_format_mac_addr(format_mac_addr
, priv
->netdev
->dev_addr
);
5194 dev_info(priv
->dev
, "MAC address: %s\n", format_mac_addr
);
5195 dev_info(priv
->dev
, "Task queue pairs numbers: %u\n", kinfo
->num_tqps
);
5196 dev_info(priv
->dev
, "RSS size: %u\n", kinfo
->rss_size
);
5197 dev_info(priv
->dev
, "Allocated RSS size: %u\n", kinfo
->req_rss_size
);
5198 dev_info(priv
->dev
, "RX buffer length: %u\n", kinfo
->rx_buf_len
);
5199 dev_info(priv
->dev
, "Desc num per TX queue: %u\n", kinfo
->num_tx_desc
);
5200 dev_info(priv
->dev
, "Desc num per RX queue: %u\n", kinfo
->num_rx_desc
);
5201 dev_info(priv
->dev
, "Total number of enabled TCs: %u\n",
5202 kinfo
->tc_info
.num_tc
);
5203 dev_info(priv
->dev
, "Max mtu size: %u\n", priv
->netdev
->max_mtu
);
5206 static void hns3_set_cq_period_mode(struct hns3_nic_priv
*priv
,
5207 enum dim_cq_period_mode mode
, bool is_tx
)
5209 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(priv
->ae_handle
->pdev
);
5210 struct hnae3_handle
*handle
= priv
->ae_handle
;
5214 priv
->tx_cqe_mode
= mode
;
5216 for (i
= 0; i
< priv
->vector_num
; i
++)
5217 priv
->tqp_vector
[i
].tx_group
.dim
.mode
= mode
;
5219 priv
->rx_cqe_mode
= mode
;
5221 for (i
= 0; i
< priv
->vector_num
; i
++)
5222 priv
->tqp_vector
[i
].rx_group
.dim
.mode
= mode
;
5225 if (hnae3_ae_dev_cq_supported(ae_dev
)) {
5229 new_mode
= (mode
== DIM_CQ_PERIOD_MODE_START_FROM_CQE
) ?
5230 HNS3_CQ_MODE_CQE
: HNS3_CQ_MODE_EQE
;
5231 reg
= is_tx
? HNS3_GL1_CQ_MODE_REG
: HNS3_GL0_CQ_MODE_REG
;
5233 writel(new_mode
, handle
->kinfo
.io_base
+ reg
);
5237 void hns3_cq_period_mode_init(struct hns3_nic_priv
*priv
,
5238 enum dim_cq_period_mode tx_mode
,
5239 enum dim_cq_period_mode rx_mode
)
5241 hns3_set_cq_period_mode(priv
, tx_mode
, true);
5242 hns3_set_cq_period_mode(priv
, rx_mode
, false);
5245 static void hns3_state_init(struct hnae3_handle
*handle
)
5247 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
5248 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5249 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5251 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5253 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B
, ae_dev
->caps
))
5254 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE
, &priv
->state
);
5256 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
5257 set_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->supported_pflags
);
5259 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
))
5260 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE
, &priv
->state
);
5262 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev
))
5263 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE
, &priv
->state
);
5266 static void hns3_state_uninit(struct hnae3_handle
*handle
)
5268 struct hns3_nic_priv
*priv
= handle
->priv
;
5270 clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5273 static int hns3_client_init(struct hnae3_handle
*handle
)
5275 struct pci_dev
*pdev
= handle
->pdev
;
5276 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
5277 u16 alloc_tqps
, max_rss_size
;
5278 struct hns3_nic_priv
*priv
;
5279 struct net_device
*netdev
;
5282 handle
->ae_algo
->ops
->get_tqps_and_rss_info(handle
, &alloc_tqps
,
5284 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
), alloc_tqps
);
5288 priv
= netdev_priv(netdev
);
5289 priv
->dev
= &pdev
->dev
;
5290 priv
->netdev
= netdev
;
5291 priv
->ae_handle
= handle
;
5292 priv
->tx_timeout_count
= 0;
5293 priv
->max_non_tso_bd_num
= ae_dev
->dev_specs
.max_non_tso_bd_num
;
5294 set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
5296 handle
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_LEVEL
);
5298 handle
->kinfo
.netdev
= netdev
;
5299 handle
->priv
= (void *)priv
;
5301 hns3_init_mac_addr(netdev
);
5303 hns3_set_default_feature(netdev
);
5305 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
5306 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5307 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
5308 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5309 hns3_ethtool_set_ops(netdev
);
5311 /* Carrier off reporting is important to ethtool even BEFORE open */
5312 netif_carrier_off(netdev
);
5314 ret
= hns3_get_ring_config(priv
);
5317 goto out_get_ring_cfg
;
5320 hns3_nic_init_coal_cfg(priv
);
5322 ret
= hns3_nic_alloc_vector_data(priv
);
5325 goto out_alloc_vector_data
;
5328 ret
= hns3_nic_init_vector_data(priv
);
5331 goto out_init_vector_data
;
5334 ret
= hns3_init_all_ring(priv
);
5340 hns3_cq_period_mode_init(priv
, DIM_CQ_PERIOD_MODE_START_FROM_EQE
,
5341 DIM_CQ_PERIOD_MODE_START_FROM_EQE
);
5343 ret
= hns3_init_phy(netdev
);
5347 /* the device can work without cpu rmap, only aRFS needs it */
5348 ret
= hns3_set_rx_cpu_rmap(netdev
);
5350 dev_warn(priv
->dev
, "set rx cpu rmap fail, ret=%d\n", ret
);
5352 ret
= hns3_nic_init_irq(priv
);
5354 dev_err(priv
->dev
, "init irq failed! ret=%d\n", ret
);
5355 hns3_free_rx_cpu_rmap(netdev
);
5356 goto out_init_irq_fail
;
5359 ret
= hns3_client_start(handle
);
5361 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
5362 goto out_client_start
;
5365 hns3_dcbnl_setup(handle
);
5367 ret
= hns3_dbg_init(handle
);
5369 dev_err(priv
->dev
, "failed to init debugfs, ret = %d\n",
5371 goto out_client_start
;
5374 netdev
->max_mtu
= HNS3_MAX_MTU(ae_dev
->dev_specs
.max_frm_size
);
5376 hns3_state_init(handle
);
5378 ret
= register_netdev(netdev
);
5380 dev_err(priv
->dev
, "probe register netdev fail!\n");
5381 goto out_reg_netdev_fail
;
5384 if (netif_msg_drv(handle
))
5385 hns3_info_show(priv
);
5389 out_reg_netdev_fail
:
5390 hns3_state_uninit(handle
);
5391 hns3_dbg_uninit(handle
);
5392 hns3_client_stop(handle
);
5394 hns3_free_rx_cpu_rmap(netdev
);
5395 hns3_nic_uninit_irq(priv
);
5397 hns3_uninit_phy(netdev
);
5399 hns3_uninit_all_ring(priv
);
5401 hns3_nic_uninit_vector_data(priv
);
5402 out_init_vector_data
:
5403 hns3_nic_dealloc_vector_data(priv
);
5404 out_alloc_vector_data
:
5407 priv
->ae_handle
= NULL
;
5408 free_netdev(netdev
);
5412 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
5414 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5415 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5417 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
5418 unregister_netdev(netdev
);
5420 hns3_client_stop(handle
);
5422 hns3_uninit_phy(netdev
);
5424 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5425 netdev_warn(netdev
, "already uninitialized\n");
5426 goto out_netdev_free
;
5429 hns3_free_rx_cpu_rmap(netdev
);
5431 hns3_nic_uninit_irq(priv
);
5433 hns3_clear_all_ring(handle
, true);
5435 hns3_nic_uninit_vector_data(priv
);
5437 hns3_nic_dealloc_vector_data(priv
);
5439 hns3_uninit_all_ring(priv
);
5441 hns3_put_ring_config(priv
);
5444 hns3_dbg_uninit(handle
);
5445 free_netdev(netdev
);
5448 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
5450 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5456 netif_tx_wake_all_queues(netdev
);
5457 netif_carrier_on(netdev
);
5458 if (netif_msg_link(handle
))
5459 netdev_info(netdev
, "link up\n");
5461 netif_carrier_off(netdev
);
5462 netif_tx_stop_all_queues(netdev
);
5463 if (netif_msg_link(handle
))
5464 netdev_info(netdev
, "link down\n");
5468 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
5470 while (ring
->next_to_clean
!= ring
->next_to_use
) {
5471 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
5472 hns3_free_buffer_detach(ring
, ring
->next_to_clean
, 0);
5473 ring_ptr_move_fw(ring
, next_to_clean
);
5476 ring
->pending_buf
= 0;
5479 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
5481 struct hns3_desc_cb res_cbs
;
5484 while (ring
->next_to_use
!= ring
->next_to_clean
) {
5485 /* When a buffer is not reused, it's memory has been
5486 * freed in hns3_handle_rx_bd or will be freed by
5487 * stack, so we need to replace the buffer here.
5489 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
5490 ret
= hns3_alloc_and_map_buffer(ring
, &res_cbs
);
5492 hns3_ring_stats_update(ring
, sw_err_cnt
);
5493 /* if alloc new buffer fail, exit directly
5494 * and reclear in up flow.
5496 netdev_warn(ring_to_netdev(ring
),
5497 "reserve buffer map failed, ret = %d\n",
5501 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
5503 ring_ptr_move_fw(ring
, next_to_use
);
5506 /* Free the pending skb in rx ring */
5508 dev_kfree_skb_any(ring
->skb
);
5510 ring
->pending_buf
= 0;
5516 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
5518 while (ring
->next_to_use
!= ring
->next_to_clean
) {
5519 /* When a buffer is not reused, it's memory has been
5520 * freed in hns3_handle_rx_bd or will be freed by
5521 * stack, so only need to unmap the buffer here.
5523 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
5524 hns3_unmap_buffer(ring
,
5525 &ring
->desc_cb
[ring
->next_to_use
]);
5526 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
5529 ring_ptr_move_fw(ring
, next_to_use
);
5533 static void hns3_clear_all_ring(struct hnae3_handle
*h
, bool force
)
5535 struct net_device
*ndev
= h
->kinfo
.netdev
;
5536 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5539 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5540 struct hns3_enet_ring
*ring
;
5542 ring
= &priv
->ring
[i
];
5543 hns3_clear_tx_ring(ring
);
5545 ring
= &priv
->ring
[i
+ h
->kinfo
.num_tqps
];
5546 /* Continue to clear other rings even if clearing some
5550 hns3_force_clear_rx_ring(ring
);
5552 hns3_clear_rx_ring(ring
);
5556 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
5558 struct net_device
*ndev
= h
->kinfo
.netdev
;
5559 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5560 struct hns3_enet_ring
*rx_ring
;
5564 ret
= h
->ae_algo
->ops
->reset_queue(h
);
5568 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
5569 hns3_init_ring_hw(&priv
->ring
[i
]);
5571 /* We need to clear tx ring here because self test will
5572 * use the ring and will not run down before up
5574 hns3_clear_tx_ring(&priv
->ring
[i
]);
5575 priv
->ring
[i
].next_to_clean
= 0;
5576 priv
->ring
[i
].next_to_use
= 0;
5577 priv
->ring
[i
].last_to_use
= 0;
5579 rx_ring
= &priv
->ring
[i
+ h
->kinfo
.num_tqps
];
5580 hns3_init_ring_hw(rx_ring
);
5581 ret
= hns3_clear_rx_ring(rx_ring
);
5585 /* We can not know the hardware head and tail when this
5586 * function is called in reset flow, so we reuse all desc.
5588 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
5589 hns3_reuse_buffer(rx_ring
, j
);
5591 rx_ring
->next_to_clean
= 0;
5592 rx_ring
->next_to_use
= 0;
5595 hns3_init_tx_ring_tc(priv
);
5600 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
5602 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5603 struct net_device
*ndev
= kinfo
->netdev
;
5604 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5606 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
))
5609 if (!netif_running(ndev
))
5612 return hns3_nic_net_stop(ndev
);
5615 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
5617 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
5618 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
5621 if (!test_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5622 netdev_err(kinfo
->netdev
, "device is not initialized yet\n");
5626 clear_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
5628 if (netif_running(kinfo
->netdev
)) {
5629 ret
= hns3_nic_net_open(kinfo
->netdev
);
5631 set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
5632 netdev_err(kinfo
->netdev
,
5633 "net up fail, ret=%d!\n", ret
);
5641 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
5643 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5644 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5647 /* Carrier off reporting is important to ethtool even BEFORE open */
5648 netif_carrier_off(netdev
);
5650 ret
= hns3_get_ring_config(priv
);
5654 ret
= hns3_nic_alloc_vector_data(priv
);
5658 ret
= hns3_nic_init_vector_data(priv
);
5660 goto err_dealloc_vector
;
5662 ret
= hns3_init_all_ring(priv
);
5664 goto err_uninit_vector
;
5666 hns3_cq_period_mode_init(priv
, priv
->tx_cqe_mode
, priv
->rx_cqe_mode
);
5668 /* the device can work without cpu rmap, only aRFS needs it */
5669 ret
= hns3_set_rx_cpu_rmap(netdev
);
5671 dev_warn(priv
->dev
, "set rx cpu rmap fail, ret=%d\n", ret
);
5673 ret
= hns3_nic_init_irq(priv
);
5675 dev_err(priv
->dev
, "init irq failed! ret=%d\n", ret
);
5676 hns3_free_rx_cpu_rmap(netdev
);
5677 goto err_init_irq_fail
;
5680 if (!hns3_is_phys_func(handle
->pdev
))
5681 hns3_init_mac_addr(netdev
);
5683 ret
= hns3_client_start(handle
);
5685 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
5686 goto err_client_start_fail
;
5689 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
5693 err_client_start_fail
:
5694 hns3_free_rx_cpu_rmap(netdev
);
5695 hns3_nic_uninit_irq(priv
);
5697 hns3_uninit_all_ring(priv
);
5699 hns3_nic_uninit_vector_data(priv
);
5701 hns3_nic_dealloc_vector_data(priv
);
5703 hns3_put_ring_config(priv
);
5708 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
5710 struct net_device
*netdev
= handle
->kinfo
.netdev
;
5711 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
5713 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
5714 netdev_warn(netdev
, "already uninitialized\n");
5718 hns3_free_rx_cpu_rmap(netdev
);
5719 hns3_nic_uninit_irq(priv
);
5720 hns3_clear_all_ring(handle
, true);
5721 hns3_reset_tx_queue(priv
->ae_handle
);
5723 hns3_nic_uninit_vector_data(priv
);
5725 hns3_nic_dealloc_vector_data(priv
);
5727 hns3_uninit_all_ring(priv
);
5729 hns3_put_ring_config(priv
);
5734 int hns3_reset_notify(struct hnae3_handle
*handle
,
5735 enum hnae3_reset_notify_type type
)
5740 case HNAE3_UP_CLIENT
:
5741 ret
= hns3_reset_notify_up_enet(handle
);
5743 case HNAE3_DOWN_CLIENT
:
5744 ret
= hns3_reset_notify_down_enet(handle
);
5746 case HNAE3_INIT_CLIENT
:
5747 ret
= hns3_reset_notify_init_enet(handle
);
5749 case HNAE3_UNINIT_CLIENT
:
5750 ret
= hns3_reset_notify_uninit_enet(handle
);
5759 static int hns3_change_channels(struct hnae3_handle
*handle
, u32 new_tqp_num
,
5760 bool rxfh_configured
)
5764 ret
= handle
->ae_algo
->ops
->set_channels(handle
, new_tqp_num
,
5767 dev_err(&handle
->pdev
->dev
,
5768 "Change tqp num(%u) fail.\n", new_tqp_num
);
5772 ret
= hns3_reset_notify(handle
, HNAE3_INIT_CLIENT
);
5776 ret
= hns3_reset_notify(handle
, HNAE3_UP_CLIENT
);
5778 hns3_reset_notify(handle
, HNAE3_UNINIT_CLIENT
);
5783 int hns3_set_channels(struct net_device
*netdev
,
5784 struct ethtool_channels
*ch
)
5786 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
5787 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
5788 bool rxfh_configured
= netif_is_rxfh_configured(netdev
);
5789 u32 new_tqp_num
= ch
->combined_count
;
5793 if (hns3_nic_resetting(netdev
))
5796 if (ch
->rx_count
|| ch
->tx_count
)
5799 if (kinfo
->tc_info
.mqprio_active
) {
5800 dev_err(&netdev
->dev
,
5801 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5805 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
5807 dev_err(&netdev
->dev
,
5808 "Change tqps fail, the tqp range is from 1 to %u",
5809 hns3_get_max_available_channels(h
));
5813 if (kinfo
->rss_size
== new_tqp_num
)
5816 netif_dbg(h
, drv
, netdev
,
5817 "set channels: tqp_num=%u, rxfh=%d\n",
5818 new_tqp_num
, rxfh_configured
);
5820 ret
= hns3_reset_notify(h
, HNAE3_DOWN_CLIENT
);
5824 ret
= hns3_reset_notify(h
, HNAE3_UNINIT_CLIENT
);
5828 org_tqp_num
= h
->kinfo
.num_tqps
;
5829 ret
= hns3_change_channels(h
, new_tqp_num
, rxfh_configured
);
5834 "Change channels fail, revert to old value\n");
5835 ret1
= hns3_change_channels(h
, org_tqp_num
, rxfh_configured
);
5838 "revert to old channel fail\n");
5848 void hns3_external_lb_prepare(struct net_device
*ndev
, bool if_running
)
5850 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5851 struct hnae3_handle
*h
= priv
->ae_handle
;
5857 netif_carrier_off(ndev
);
5858 netif_tx_disable(ndev
);
5860 for (i
= 0; i
< priv
->vector_num
; i
++)
5861 hns3_vector_disable(&priv
->tqp_vector
[i
]);
5863 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
5864 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
5866 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
5867 * during reset process, because driver may not be able
5868 * to disable the ring through firmware when downing the netdev.
5870 if (!hns3_nic_resetting(ndev
))
5871 hns3_nic_reset_all_ring(priv
->ae_handle
);
5873 hns3_reset_tx_queue(priv
->ae_handle
);
5876 void hns3_external_lb_restore(struct net_device
*ndev
, bool if_running
)
5878 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
5879 struct hnae3_handle
*h
= priv
->ae_handle
;
5885 hns3_nic_reset_all_ring(priv
->ae_handle
);
5887 for (i
= 0; i
< priv
->vector_num
; i
++)
5888 hns3_vector_enable(&priv
->tqp_vector
[i
]);
5890 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
5891 hns3_tqp_enable(h
->kinfo
.tqp
[i
]);
5893 netif_tx_wake_all_queues(ndev
);
5895 if (h
->ae_algo
->ops
->get_status(h
))
5896 netif_carrier_on(ndev
);
5899 static const struct hns3_hw_error_info hns3_hw_err
[] = {
5900 { .type
= HNAE3_PPU_POISON_ERROR
,
5901 .msg
= "PPU poison" },
5902 { .type
= HNAE3_CMDQ_ECC_ERROR
,
5903 .msg
= "IMP CMDQ error" },
5904 { .type
= HNAE3_IMP_RD_POISON_ERROR
,
5905 .msg
= "IMP RD poison" },
5906 { .type
= HNAE3_ROCEE_AXI_RESP_ERROR
,
5907 .msg
= "ROCEE AXI RESP error" },
5910 static void hns3_process_hw_error(struct hnae3_handle
*handle
,
5911 enum hnae3_hw_error_type type
)
5915 for (i
= 0; i
< ARRAY_SIZE(hns3_hw_err
); i
++) {
5916 if (hns3_hw_err
[i
].type
== type
) {
5917 dev_err(&handle
->pdev
->dev
, "Detected %s!\n",
5918 hns3_hw_err
[i
].msg
);
5924 static const struct hnae3_client_ops client_ops
= {
5925 .init_instance
= hns3_client_init
,
5926 .uninit_instance
= hns3_client_uninit
,
5927 .link_status_change
= hns3_link_status_change
,
5928 .reset_notify
= hns3_reset_notify
,
5929 .process_hw_error
= hns3_process_hw_error
,
5932 /* hns3_init_module - Driver registration routine
5933 * hns3_init_module is the first routine called when the driver is
5934 * loaded. All it does is register with the PCI subsystem.
5936 static int __init
hns3_init_module(void)
5940 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
5941 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
5943 client
.type
= HNAE3_CLIENT_KNIC
;
5944 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
, "%s",
5947 client
.ops
= &client_ops
;
5949 INIT_LIST_HEAD(&client
.node
);
5951 hns3_dbg_register_debugfs(hns3_driver_name
);
5953 ret
= hnae3_register_client(&client
);
5955 goto err_reg_client
;
5957 ret
= pci_register_driver(&hns3_driver
);
5959 goto err_reg_driver
;
5964 hnae3_unregister_client(&client
);
5966 hns3_dbg_unregister_debugfs();
5969 module_init(hns3_init_module
);
5971 /* hns3_exit_module - Driver exit cleanup routine
5972 * hns3_exit_module is called just before the driver is removed
5975 static void __exit
hns3_exit_module(void)
5977 pci_unregister_driver(&hns3_driver
);
5978 hnae3_unregister_client(&client
);
5979 hns3_dbg_unregister_debugfs();
5981 module_exit(hns3_exit_module
);
5983 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
5984 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5985 MODULE_LICENSE("GPL");
5986 MODULE_ALIAS("pci:hns-nic");