]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/intel/idpf/idpf_ethtool.c
idpf: Interpret .set_channels() input differently
[thirdparty/kernel/stable.git] / drivers / net / ethernet / intel / idpf / idpf_ethtool.c
CommitLineData
02cbfba1
AB
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2023 Intel Corporation */
3
4#include "idpf.h"
5
6/**
7 * idpf_get_rxnfc - command to get RX flow classification rules
8 * @netdev: network interface device structure
9 * @cmd: ethtool rxnfc command
10 * @rule_locs: pointer to store rule locations
11 *
12 * Returns Success if the command is supported.
13 */
14static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
15 u32 __always_unused *rule_locs)
16{
17 struct idpf_vport *vport;
18
19 idpf_vport_ctrl_lock(netdev);
20 vport = idpf_netdev_to_vport(netdev);
21
22 switch (cmd->cmd) {
23 case ETHTOOL_GRXRINGS:
24 cmd->data = vport->num_rxq;
25 idpf_vport_ctrl_unlock(netdev);
26
27 return 0;
28 default:
29 break;
30 }
31
32 idpf_vport_ctrl_unlock(netdev);
33
34 return -EOPNOTSUPP;
35}
36
37/**
38 * idpf_get_rxfh_key_size - get the RSS hash key size
39 * @netdev: network interface device structure
40 *
41 * Returns the key size on success, error value on failure.
42 */
43static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
44{
45 struct idpf_netdev_priv *np = netdev_priv(netdev);
46 struct idpf_vport_user_config_data *user_config;
47
48 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
49 return -EOPNOTSUPP;
50
51 user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
52
53 return user_config->rss_data.rss_key_size;
54}
55
56/**
57 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
58 * @netdev: network interface device structure
59 *
60 * Returns the table size on success, error value on failure.
61 */
62static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
63{
64 struct idpf_netdev_priv *np = netdev_priv(netdev);
65 struct idpf_vport_user_config_data *user_config;
66
67 if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
68 return -EOPNOTSUPP;
69
70 user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
71
72 return user_config->rss_data.rss_lut_size;
73}
74
75/**
76 * idpf_get_rxfh - get the rx flow hash indirection table
77 * @netdev: network interface device structure
fb6e30a7 78 * @rxfh: pointer to param struct (indir, key, hfunc)
02cbfba1
AB
79 *
80 * Reads the indirection table directly from the hardware. Always returns 0.
81 */
fb6e30a7
AZ
82static int idpf_get_rxfh(struct net_device *netdev,
83 struct ethtool_rxfh_param *rxfh)
02cbfba1
AB
84{
85 struct idpf_netdev_priv *np = netdev_priv(netdev);
86 struct idpf_rss_data *rss_data;
87 struct idpf_adapter *adapter;
88 int err = 0;
89 u16 i;
90
91 idpf_vport_ctrl_lock(netdev);
92
93 adapter = np->adapter;
94
95 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
96 err = -EOPNOTSUPP;
97 goto unlock_mutex;
98 }
99
100 rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
101 if (np->state != __IDPF_VPORT_UP)
102 goto unlock_mutex;
103
fb6e30a7 104 rxfh->hfunc = ETH_RSS_HASH_TOP;
02cbfba1 105
fb6e30a7
AZ
106 if (rxfh->key)
107 memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
02cbfba1 108
fb6e30a7 109 if (rxfh->indir) {
02cbfba1 110 for (i = 0; i < rss_data->rss_lut_size; i++)
fb6e30a7 111 rxfh->indir[i] = rss_data->rss_lut[i];
02cbfba1
AB
112 }
113
114unlock_mutex:
115 idpf_vport_ctrl_unlock(netdev);
116
117 return err;
118}
119
120/**
121 * idpf_set_rxfh - set the rx flow hash indirection table
122 * @netdev: network interface device structure
fb6e30a7
AZ
123 * @rxfh: pointer to param struct (indir, key, hfunc)
124 * @extack: extended ACK from the Netlink message
02cbfba1
AB
125 *
126 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
127 * returns 0 after programming the table.
128 */
fb6e30a7
AZ
129static int idpf_set_rxfh(struct net_device *netdev,
130 struct ethtool_rxfh_param *rxfh,
131 struct netlink_ext_ack *extack)
02cbfba1
AB
132{
133 struct idpf_netdev_priv *np = netdev_priv(netdev);
134 struct idpf_rss_data *rss_data;
135 struct idpf_adapter *adapter;
136 struct idpf_vport *vport;
137 int err = 0;
138 u16 lut;
139
140 idpf_vport_ctrl_lock(netdev);
141 vport = idpf_netdev_to_vport(netdev);
142
143 adapter = vport->adapter;
144
145 if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
146 err = -EOPNOTSUPP;
147 goto unlock_mutex;
148 }
149
150 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
151 if (np->state != __IDPF_VPORT_UP)
152 goto unlock_mutex;
153
fb6e30a7
AZ
154 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
155 rxfh->hfunc != ETH_RSS_HASH_TOP) {
02cbfba1
AB
156 err = -EOPNOTSUPP;
157 goto unlock_mutex;
158 }
159
fb6e30a7
AZ
160 if (rxfh->key)
161 memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
02cbfba1 162
fb6e30a7 163 if (rxfh->indir) {
02cbfba1 164 for (lut = 0; lut < rss_data->rss_lut_size; lut++)
fb6e30a7 165 rss_data->rss_lut[lut] = rxfh->indir[lut];
02cbfba1
AB
166 }
167
168 err = idpf_config_rss(vport);
169
170unlock_mutex:
171 idpf_vport_ctrl_unlock(netdev);
172
173 return err;
174}
175
176/**
177 * idpf_get_channels: get the number of channels supported by the device
178 * @netdev: network interface device structure
179 * @ch: channel information structure
180 *
181 * Report maximum of TX and RX. Report one extra channel to match our MailBox
182 * Queue.
183 */
184static void idpf_get_channels(struct net_device *netdev,
185 struct ethtool_channels *ch)
186{
187 struct idpf_netdev_priv *np = netdev_priv(netdev);
188 struct idpf_vport_config *vport_config;
189 u16 num_txq, num_rxq;
190 u16 combined;
191
192 vport_config = np->adapter->vport_config[np->vport_idx];
193
194 num_txq = vport_config->user_config.num_req_tx_qs;
195 num_rxq = vport_config->user_config.num_req_rx_qs;
196
197 combined = min(num_txq, num_rxq);
198
199 /* Report maximum channels */
200 ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
201 vport_config->max_q.max_rxq);
202 ch->max_rx = vport_config->max_q.max_rxq;
203 ch->max_tx = vport_config->max_q.max_txq;
204
205 ch->max_other = IDPF_MAX_MBXQ;
206 ch->other_count = IDPF_MAX_MBXQ;
207
208 ch->combined_count = combined;
209 ch->rx_count = num_rxq - combined;
210 ch->tx_count = num_txq - combined;
211}
212
213/**
214 * idpf_set_channels: set the new channel count
215 * @netdev: network interface device structure
216 * @ch: channel information structure
217 *
218 * Negotiate a new number of channels with CP. Returns 0 on success, negative
219 * on failure.
220 */
221static int idpf_set_channels(struct net_device *netdev,
222 struct ethtool_channels *ch)
223{
224 struct idpf_vport_config *vport_config;
02cbfba1
AB
225 unsigned int num_req_tx_q;
226 unsigned int num_req_rx_q;
227 struct idpf_vport *vport;
5e7695e0 228 u16 num_txq, num_rxq;
02cbfba1
AB
229 struct device *dev;
230 int err = 0;
231 u16 idx;
232
5e7695e0
LZ
233 if (ch->rx_count && ch->tx_count) {
234 netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
235 return -EINVAL;
236 }
237
02cbfba1
AB
238 idpf_vport_ctrl_lock(netdev);
239 vport = idpf_netdev_to_vport(netdev);
240
241 idx = vport->idx;
242 vport_config = vport->adapter->vport_config[idx];
243
244 num_txq = vport_config->user_config.num_req_tx_qs;
245 num_rxq = vport_config->user_config.num_req_rx_qs;
246
02cbfba1
AB
247 num_req_tx_q = ch->combined_count + ch->tx_count;
248 num_req_rx_q = ch->combined_count + ch->rx_count;
249
250 dev = &vport->adapter->pdev->dev;
251 /* It's possible to specify number of queues that exceeds max.
252 * Stack checks max combined_count and max [tx|rx]_count but not the
253 * max combined_count + [tx|rx]_count. These checks should catch that.
254 */
255 if (num_req_tx_q > vport_config->max_q.max_txq) {
256 dev_info(dev, "Maximum TX queues is %d\n",
257 vport_config->max_q.max_txq);
258 err = -EINVAL;
259 goto unlock_mutex;
260 }
261 if (num_req_rx_q > vport_config->max_q.max_rxq) {
262 dev_info(dev, "Maximum RX queues is %d\n",
263 vport_config->max_q.max_rxq);
264 err = -EINVAL;
265 goto unlock_mutex;
266 }
267
268 if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
269 goto unlock_mutex;
270
271 vport_config->user_config.num_req_tx_qs = num_req_tx_q;
272 vport_config->user_config.num_req_rx_qs = num_req_rx_q;
273
274 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
275 if (err) {
276 /* roll back queue change */
277 vport_config->user_config.num_req_tx_qs = num_txq;
278 vport_config->user_config.num_req_rx_qs = num_rxq;
279 }
280
281unlock_mutex:
282 idpf_vport_ctrl_unlock(netdev);
283
284 return err;
285}
286
287/**
288 * idpf_get_ringparam - Get ring parameters
289 * @netdev: network interface device structure
290 * @ring: ethtool ringparam structure
291 * @kring: unused
292 * @ext_ack: unused
293 *
294 * Returns current ring parameters. TX and RX rings are reported separately,
295 * but the number of rings is not reported.
296 */
297static void idpf_get_ringparam(struct net_device *netdev,
298 struct ethtool_ringparam *ring,
299 struct kernel_ethtool_ringparam *kring,
300 struct netlink_ext_ack *ext_ack)
301{
302 struct idpf_vport *vport;
303
304 idpf_vport_ctrl_lock(netdev);
305 vport = idpf_netdev_to_vport(netdev);
306
307 ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
308 ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
309 ring->rx_pending = vport->rxq_desc_count;
310 ring->tx_pending = vport->txq_desc_count;
311
9b1aa3ef
MK
312 kring->tcp_data_split = idpf_vport_get_hsplit(vport);
313
02cbfba1
AB
314 idpf_vport_ctrl_unlock(netdev);
315}
316
317/**
318 * idpf_set_ringparam - Set ring parameters
319 * @netdev: network interface device structure
320 * @ring: ethtool ringparam structure
321 * @kring: unused
322 * @ext_ack: unused
323 *
324 * Sets ring parameters. TX and RX rings are controlled separately, but the
325 * number of rings is not specified, so all rings get the same settings.
326 */
327static int idpf_set_ringparam(struct net_device *netdev,
328 struct ethtool_ringparam *ring,
329 struct kernel_ethtool_ringparam *kring,
330 struct netlink_ext_ack *ext_ack)
331{
332 struct idpf_vport_user_config_data *config_data;
333 u32 new_rx_count, new_tx_count;
334 struct idpf_vport *vport;
335 int i, err = 0;
336 u16 idx;
337
338 idpf_vport_ctrl_lock(netdev);
339 vport = idpf_netdev_to_vport(netdev);
340
341 idx = vport->idx;
342
343 if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
344 netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
345 ring->tx_pending,
346 IDPF_MIN_TXQ_DESC);
347 err = -EINVAL;
348 goto unlock_mutex;
349 }
350
351 if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
352 netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
353 ring->rx_pending,
354 IDPF_MIN_RXQ_DESC);
355 err = -EINVAL;
356 goto unlock_mutex;
357 }
358
359 new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
360 if (new_rx_count != ring->rx_pending)
361 netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
362 new_rx_count);
363
364 new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
365 if (new_tx_count != ring->tx_pending)
366 netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
367 new_tx_count);
368
369 if (new_tx_count == vport->txq_desc_count &&
67708158
MS
370 new_rx_count == vport->rxq_desc_count &&
371 kring->tcp_data_split == idpf_vport_get_hsplit(vport))
02cbfba1
AB
372 goto unlock_mutex;
373
9b1aa3ef
MK
374 if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
375 NL_SET_ERR_MSG_MOD(ext_ack,
376 "setting TCP data split is not supported");
377 err = -EOPNOTSUPP;
378
379 goto unlock_mutex;
380 }
381
02cbfba1
AB
382 config_data = &vport->adapter->vport_config[idx]->user_config;
383 config_data->num_req_txq_desc = new_tx_count;
384 config_data->num_req_rxq_desc = new_rx_count;
385
386 /* Since we adjusted the RX completion queue count, the RX buffer queue
387 * descriptor count needs to be adjusted as well
388 */
389 for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
390 vport->bufq_desc_count[i] =
391 IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
392 vport->num_bufqs_per_qgrp);
393
394 err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
395
396unlock_mutex:
397 idpf_vport_ctrl_unlock(netdev);
398
399 return err;
400}
401
402/**
403 * struct idpf_stats - definition for an ethtool statistic
404 * @stat_string: statistic name to display in ethtool -S output
405 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
406 * @stat_offset: offsetof() the stat from a base pointer
407 *
408 * This structure defines a statistic to be added to the ethtool stats buffer.
409 * It defines a statistic as offset from a common base pointer. Stats should
410 * be defined in constant arrays using the IDPF_STAT macro, with every element
411 * of the array using the same _type for calculating the sizeof_stat and
412 * stat_offset.
413 *
414 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
415 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
416 * the idpf_add_ethtool_stat() helper function.
417 *
418 * The @stat_string is interpreted as a format string, allowing formatted
419 * values to be inserted while looping over multiple structures for a given
420 * statistics array. Thus, every statistic string in an array should have the
421 * same type and number of format specifiers, to be formatted by variadic
422 * arguments to the idpf_add_stat_string() helper function.
423 */
424struct idpf_stats {
425 char stat_string[ETH_GSTRING_LEN];
426 int sizeof_stat;
427 int stat_offset;
428};
429
430/* Helper macro to define an idpf_stat structure with proper size and type.
431 * Use this when defining constant statistics arrays. Note that @_type expects
432 * only a type name and is used multiple times.
433 */
434#define IDPF_STAT(_type, _name, _stat) { \
435 .stat_string = _name, \
436 .sizeof_stat = sizeof_field(_type, _stat), \
437 .stat_offset = offsetof(_type, _stat) \
438}
439
440/* Helper macro for defining some statistics related to queues */
441#define IDPF_QUEUE_STAT(_name, _stat) \
442 IDPF_STAT(struct idpf_queue, _name, _stat)
443
444/* Stats associated with a Tx queue */
445static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
446 IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
447 IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
448 IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
449};
450
451/* Stats associated with an Rx queue */
452static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
453 IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
454 IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
455 IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
456};
457
458#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
459#define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
460
461#define IDPF_PORT_STAT(_name, _stat) \
462 IDPF_STAT(struct idpf_vport, _name, _stat)
463
464static const struct idpf_stats idpf_gstrings_port_stats[] = {
465 IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
466 IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
467 IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
468 IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
469 IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
470 IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
471 IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
472 IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
473 IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
474 IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
475 IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
476 IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
477 IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
478 IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
479 IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
480};
481
482#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
483
484/**
485 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
486 * @p: ethtool supplied buffer
487 * @stats: stat definitions array
488 * @size: size of the stats array
489 * @type: stat type
490 * @idx: stat index
491 *
492 * Format and copy the strings described by stats into the buffer pointed at
493 * by p.
494 */
495static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
496 const unsigned int size, const char *type,
497 unsigned int idx)
498{
499 unsigned int i;
500
501 for (i = 0; i < size; i++)
502 ethtool_sprintf(p, "%s_q-%u_%s",
503 type, idx, stats[i].stat_string);
504}
505
506/**
507 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
508 * @p: ethtool supplied buffer
509 * @stats: stat definitions array
510 * @type: stat type
511 * @idx: stat idx
512 *
513 * Format and copy the strings described by the const static stats value into
514 * the buffer pointed at by p.
515 *
516 * The parameter @stats is evaluated twice, so parameters with side effects
517 * should be avoided. Additionally, stats must be an array such that
518 * ARRAY_SIZE can be called on it.
519 */
520#define idpf_add_qstat_strings(p, stats, type, idx) \
521 __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
522
523/**
524 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
525 * @p: ethtool buffer
526 * @stats: struct to copy from
527 * @size: size of stats array to copy from
528 */
529static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
530 const unsigned int size)
531{
532 unsigned int i;
533
534 for (i = 0; i < size; i++)
e403cfff 535 ethtool_puts(p, stats[i].stat_string);
02cbfba1
AB
536}
537
538/**
539 * idpf_get_stat_strings - Get stat strings
540 * @netdev: network interface device structure
541 * @data: buffer for string data
542 *
543 * Builds the statistics string table
544 */
545static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
546{
547 struct idpf_netdev_priv *np = netdev_priv(netdev);
548 struct idpf_vport_config *vport_config;
549 unsigned int i;
550
551 idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
552 IDPF_PORT_STATS_LEN);
553
554 vport_config = np->adapter->vport_config[np->vport_idx];
555 /* It's critical that we always report a constant number of strings and
556 * that the strings are reported in the same order regardless of how
557 * many queues are actually in use.
558 */
559 for (i = 0; i < vport_config->max_q.max_txq; i++)
560 idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
561 "tx", i);
562
563 for (i = 0; i < vport_config->max_q.max_rxq; i++)
564 idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
565 "rx", i);
566
567 page_pool_ethtool_stats_get_strings(data);
568}
569
570/**
571 * idpf_get_strings - Get string set
572 * @netdev: network interface device structure
573 * @sset: id of string set
574 * @data: buffer for string data
575 *
576 * Builds string tables for various string sets
577 */
578static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
579{
580 switch (sset) {
581 case ETH_SS_STATS:
582 idpf_get_stat_strings(netdev, data);
583 break;
584 default:
585 break;
586 }
587}
588
589/**
590 * idpf_get_sset_count - Get length of string set
591 * @netdev: network interface device structure
592 * @sset: id of string set
593 *
594 * Reports size of various string tables.
595 */
596static int idpf_get_sset_count(struct net_device *netdev, int sset)
597{
598 struct idpf_netdev_priv *np = netdev_priv(netdev);
599 struct idpf_vport_config *vport_config;
600 u16 max_txq, max_rxq;
601 unsigned int size;
602
603 if (sset != ETH_SS_STATS)
604 return -EINVAL;
605
606 vport_config = np->adapter->vport_config[np->vport_idx];
607 /* This size reported back here *must* be constant throughout the
608 * lifecycle of the netdevice, i.e. we must report the maximum length
609 * even for queues that don't technically exist. This is due to the
610 * fact that this userspace API uses three separate ioctl calls to get
611 * stats data but has no way to communicate back to userspace when that
612 * size has changed, which can typically happen as a result of changing
613 * number of queues. If the number/order of stats change in the middle
614 * of this call chain it will lead to userspace crashing/accessing bad
615 * data through buffer under/overflow.
616 */
617 max_txq = vport_config->max_q.max_txq;
618 max_rxq = vport_config->max_q.max_rxq;
619
620 size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
621 (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
622 size += page_pool_ethtool_stats_get_count();
623
624 return size;
625}
626
627/**
628 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
629 * @data: location to store the stat value
630 * @pstat: old stat pointer to copy from
631 * @stat: the stat definition
632 *
633 * Copies the stat data defined by the pointer and stat structure pair into
634 * the memory supplied as data. If the pointer is null, data will be zero'd.
635 */
636static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
637 const struct idpf_stats *stat)
638{
639 char *p;
640
641 if (!pstat) {
642 /* Ensure that the ethtool data buffer is zero'd for any stats
643 * which don't have a valid pointer.
644 */
645 *data = 0;
646 return;
647 }
648
649 p = (char *)pstat + stat->stat_offset;
650 switch (stat->sizeof_stat) {
651 case sizeof(u64):
652 *data = *((u64 *)p);
653 break;
654 case sizeof(u32):
655 *data = *((u32 *)p);
656 break;
657 case sizeof(u16):
658 *data = *((u16 *)p);
659 break;
660 case sizeof(u8):
661 *data = *((u8 *)p);
662 break;
663 default:
664 WARN_ONCE(1, "unexpected stat size for %s",
665 stat->stat_string);
666 *data = 0;
667 }
668}
669
670/**
671 * idpf_add_queue_stats - copy queue statistics into supplied buffer
672 * @data: ethtool stats buffer
673 * @q: the queue to copy
674 *
675 * Queue statistics must be copied while protected by u64_stats_fetch_begin,
676 * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
677 * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
678 * zero out the queue stat values and update the data pointer. Otherwise
679 * safely copy the stats from the queue into the supplied buffer and update
680 * the data pointer when finished.
681 *
682 * This function expects to be called while under rcu_read_lock().
683 */
684static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
685{
686 const struct idpf_stats *stats;
687 unsigned int start;
688 unsigned int size;
689 unsigned int i;
690
691 if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
692 size = IDPF_RX_QUEUE_STATS_LEN;
693 stats = idpf_gstrings_rx_queue_stats;
694 } else {
695 size = IDPF_TX_QUEUE_STATS_LEN;
696 stats = idpf_gstrings_tx_queue_stats;
697 }
698
699 /* To avoid invalid statistics values, ensure that we keep retrying
700 * the copy until we get a consistent value according to
701 * u64_stats_fetch_retry.
702 */
703 do {
704 start = u64_stats_fetch_begin(&q->stats_sync);
705 for (i = 0; i < size; i++)
706 idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
707 } while (u64_stats_fetch_retry(&q->stats_sync, start));
708
709 /* Once we successfully copy the stats in, update the data pointer */
710 *data += size;
711}
712
713/**
714 * idpf_add_empty_queue_stats - Add stats for a non-existent queue
715 * @data: pointer to data buffer
716 * @qtype: type of data queue
717 *
718 * We must report a constant length of stats back to userspace regardless of
719 * how many queues are actually in use because stats collection happens over
720 * three separate ioctls and there's no way to notify userspace the size
721 * changed between those calls. This adds empty to data to the stats since we
722 * don't have a real queue to refer to for this stats slot.
723 */
724static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
725{
726 unsigned int i;
727 int stats_len;
728
729 if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
730 stats_len = IDPF_RX_QUEUE_STATS_LEN;
731 else
732 stats_len = IDPF_TX_QUEUE_STATS_LEN;
733
734 for (i = 0; i < stats_len; i++)
735 (*data)[i] = 0;
736 *data += stats_len;
737}
738
739/**
740 * idpf_add_port_stats - Copy port stats into ethtool buffer
741 * @vport: virtual port struct
742 * @data: ethtool buffer to copy into
743 */
744static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
745{
746 unsigned int size = IDPF_PORT_STATS_LEN;
747 unsigned int start;
748 unsigned int i;
749
750 do {
751 start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
752 for (i = 0; i < size; i++)
753 idpf_add_one_ethtool_stat(&(*data)[i], vport,
754 &idpf_gstrings_port_stats[i]);
755 } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
756
757 *data += size;
758}
759
760/**
761 * idpf_collect_queue_stats - accumulate various per queue stats
762 * into port level stats
763 * @vport: pointer to vport struct
764 **/
765static void idpf_collect_queue_stats(struct idpf_vport *vport)
766{
767 struct idpf_port_stats *pstats = &vport->port_stats;
768 int i, j;
769
770 /* zero out port stats since they're actually tracked in per
771 * queue stats; this is only for reporting
772 */
773 u64_stats_update_begin(&pstats->stats_sync);
774 u64_stats_set(&pstats->rx_hw_csum_err, 0);
775 u64_stats_set(&pstats->rx_hsplit, 0);
776 u64_stats_set(&pstats->rx_hsplit_hbo, 0);
777 u64_stats_set(&pstats->rx_bad_descs, 0);
778 u64_stats_set(&pstats->tx_linearize, 0);
779 u64_stats_set(&pstats->tx_busy, 0);
780 u64_stats_set(&pstats->tx_drops, 0);
781 u64_stats_set(&pstats->tx_dma_map_errs, 0);
782 u64_stats_update_end(&pstats->stats_sync);
783
784 for (i = 0; i < vport->num_rxq_grp; i++) {
785 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
786 u16 num_rxq;
787
788 if (idpf_is_queue_model_split(vport->rxq_model))
789 num_rxq = rxq_grp->splitq.num_rxq_sets;
790 else
791 num_rxq = rxq_grp->singleq.num_rxq;
792
793 for (j = 0; j < num_rxq; j++) {
794 u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
795 struct idpf_rx_queue_stats *stats;
796 struct idpf_queue *rxq;
797 unsigned int start;
798
799 if (idpf_is_queue_model_split(vport->rxq_model))
800 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
801 else
802 rxq = rxq_grp->singleq.rxqs[j];
803
804 if (!rxq)
805 continue;
806
807 do {
808 start = u64_stats_fetch_begin(&rxq->stats_sync);
809
810 stats = &rxq->q_stats.rx;
811 hw_csum_err = u64_stats_read(&stats->hw_csum_err);
812 hsplit = u64_stats_read(&stats->hsplit_pkts);
813 hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
814 bad_descs = u64_stats_read(&stats->bad_descs);
815 } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
816
817 u64_stats_update_begin(&pstats->stats_sync);
818 u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
819 u64_stats_add(&pstats->rx_hsplit, hsplit);
820 u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
821 u64_stats_add(&pstats->rx_bad_descs, bad_descs);
822 u64_stats_update_end(&pstats->stats_sync);
823 }
824 }
825
826 for (i = 0; i < vport->num_txq_grp; i++) {
827 struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
828
829 for (j = 0; j < txq_grp->num_txq; j++) {
830 u64 linearize, qbusy, skb_drops, dma_map_errs;
831 struct idpf_queue *txq = txq_grp->txqs[j];
832 struct idpf_tx_queue_stats *stats;
833 unsigned int start;
834
835 if (!txq)
836 continue;
837
838 do {
839 start = u64_stats_fetch_begin(&txq->stats_sync);
840
841 stats = &txq->q_stats.tx;
842 linearize = u64_stats_read(&stats->linearize);
843 qbusy = u64_stats_read(&stats->q_busy);
844 skb_drops = u64_stats_read(&stats->skb_drops);
845 dma_map_errs = u64_stats_read(&stats->dma_map_errs);
846 } while (u64_stats_fetch_retry(&txq->stats_sync, start));
847
848 u64_stats_update_begin(&pstats->stats_sync);
849 u64_stats_add(&pstats->tx_linearize, linearize);
850 u64_stats_add(&pstats->tx_busy, qbusy);
851 u64_stats_add(&pstats->tx_drops, skb_drops);
852 u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
853 u64_stats_update_end(&pstats->stats_sync);
854 }
855 }
856}
857
858/**
859 * idpf_get_ethtool_stats - report device statistics
860 * @netdev: network interface device structure
861 * @stats: ethtool statistics structure
862 * @data: pointer to data buffer
863 *
864 * All statistics are added to the data buffer as an array of u64.
865 */
866static void idpf_get_ethtool_stats(struct net_device *netdev,
867 struct ethtool_stats __always_unused *stats,
868 u64 *data)
869{
870 struct idpf_netdev_priv *np = netdev_priv(netdev);
871 struct idpf_vport_config *vport_config;
872 struct page_pool_stats pp_stats = { };
873 struct idpf_vport *vport;
874 unsigned int total = 0;
875 unsigned int i, j;
876 bool is_splitq;
877 u16 qtype;
878
879 idpf_vport_ctrl_lock(netdev);
880 vport = idpf_netdev_to_vport(netdev);
881
882 if (np->state != __IDPF_VPORT_UP) {
883 idpf_vport_ctrl_unlock(netdev);
884
885 return;
886 }
887
888 rcu_read_lock();
889
890 idpf_collect_queue_stats(vport);
891 idpf_add_port_stats(vport, &data);
892
893 for (i = 0; i < vport->num_txq_grp; i++) {
894 struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
895
896 qtype = VIRTCHNL2_QUEUE_TYPE_TX;
897
898 for (j = 0; j < txq_grp->num_txq; j++, total++) {
899 struct idpf_queue *txq = txq_grp->txqs[j];
900
901 if (!txq)
902 idpf_add_empty_queue_stats(&data, qtype);
903 else
904 idpf_add_queue_stats(&data, txq);
905 }
906 }
907
908 vport_config = vport->adapter->vport_config[vport->idx];
909 /* It is critical we provide a constant number of stats back to
910 * userspace regardless of how many queues are actually in use because
911 * there is no way to inform userspace the size has changed between
912 * ioctl calls. This will fill in any missing stats with zero.
913 */
914 for (; total < vport_config->max_q.max_txq; total++)
915 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
916 total = 0;
917
918 is_splitq = idpf_is_queue_model_split(vport->rxq_model);
919
920 for (i = 0; i < vport->num_rxq_grp; i++) {
921 struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
922 u16 num_rxq;
923
924 qtype = VIRTCHNL2_QUEUE_TYPE_RX;
925
926 if (is_splitq)
927 num_rxq = rxq_grp->splitq.num_rxq_sets;
928 else
929 num_rxq = rxq_grp->singleq.num_rxq;
930
931 for (j = 0; j < num_rxq; j++, total++) {
932 struct idpf_queue *rxq;
933
934 if (is_splitq)
935 rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
936 else
937 rxq = rxq_grp->singleq.rxqs[j];
938 if (!rxq)
939 idpf_add_empty_queue_stats(&data, qtype);
940 else
941 idpf_add_queue_stats(&data, rxq);
942
943 /* In splitq mode, don't get page pool stats here since
944 * the pools are attached to the buffer queues
945 */
946 if (is_splitq)
947 continue;
948
949 if (rxq)
950 page_pool_get_stats(rxq->pp, &pp_stats);
951 }
952 }
953
954 for (i = 0; i < vport->num_rxq_grp; i++) {
955 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
956 struct idpf_queue *rxbufq =
957 &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
958
959 page_pool_get_stats(rxbufq->pp, &pp_stats);
960 }
961 }
962
963 for (; total < vport_config->max_q.max_rxq; total++)
964 idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
965
966 page_pool_ethtool_stats_get(data, &pp_stats);
967
968 rcu_read_unlock();
969
970 idpf_vport_ctrl_unlock(netdev);
971}
972
973/**
974 * idpf_find_rxq - find rxq from q index
975 * @vport: virtual port associated to queue
976 * @q_num: q index used to find queue
977 *
978 * returns pointer to rx queue
979 */
980static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
981{
982 int q_grp, q_idx;
983
984 if (!idpf_is_queue_model_split(vport->rxq_model))
985 return vport->rxq_grps->singleq.rxqs[q_num];
986
987 q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
988 q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
989
990 return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
991}
992
993/**
994 * idpf_find_txq - find txq from q index
995 * @vport: virtual port associated to queue
996 * @q_num: q index used to find queue
997 *
998 * returns pointer to tx queue
999 */
1000static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
1001{
1002 int q_grp;
1003
1004 if (!idpf_is_queue_model_split(vport->txq_model))
1005 return vport->txqs[q_num];
1006
1007 q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1008
1009 return vport->txq_grps[q_grp].complq;
1010}
1011
1012/**
1013 * __idpf_get_q_coalesce - get ITR values for specific queue
1014 * @ec: ethtool structure to fill with driver's coalesce settings
1015 * @q: quuee of Rx or Tx
1016 */
1017static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1018 struct idpf_queue *q)
1019{
1020 if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
1021 ec->use_adaptive_rx_coalesce =
1022 IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
1023 ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
1024 } else {
1025 ec->use_adaptive_tx_coalesce =
1026 IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
1027 ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
1028 }
1029}
1030
1031/**
1032 * idpf_get_q_coalesce - get ITR values for specific queue
1033 * @netdev: pointer to the netdev associated with this query
1034 * @ec: coalesce settings to program the device with
1035 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1036 *
1037 * Return 0 on success, and negative on failure
1038 */
1039static int idpf_get_q_coalesce(struct net_device *netdev,
1040 struct ethtool_coalesce *ec,
1041 u32 q_num)
1042{
1043 struct idpf_netdev_priv *np = netdev_priv(netdev);
1044 struct idpf_vport *vport;
1045 int err = 0;
1046
1047 idpf_vport_ctrl_lock(netdev);
1048 vport = idpf_netdev_to_vport(netdev);
1049
1050 if (np->state != __IDPF_VPORT_UP)
1051 goto unlock_mutex;
1052
1053 if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1054 err = -EINVAL;
1055 goto unlock_mutex;
1056 }
1057
1058 if (q_num < vport->num_rxq)
1059 __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
1060
1061 if (q_num < vport->num_txq)
1062 __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
1063
1064unlock_mutex:
1065 idpf_vport_ctrl_unlock(netdev);
1066
1067 return err;
1068}
1069
1070/**
1071 * idpf_get_coalesce - get ITR values as requested by user
1072 * @netdev: pointer to the netdev associated with this query
1073 * @ec: coalesce settings to be filled
1074 * @kec: unused
1075 * @extack: unused
1076 *
1077 * Return 0 on success, and negative on failure
1078 */
1079static int idpf_get_coalesce(struct net_device *netdev,
1080 struct ethtool_coalesce *ec,
1081 struct kernel_ethtool_coalesce *kec,
1082 struct netlink_ext_ack *extack)
1083{
1084 /* Return coalesce based on queue number zero */
1085 return idpf_get_q_coalesce(netdev, ec, 0);
1086}
1087
1088/**
1089 * idpf_get_per_q_coalesce - get ITR values as requested by user
1090 * @netdev: pointer to the netdev associated with this query
1091 * @q_num: queue for which the itr values has to retrieved
1092 * @ec: coalesce settings to be filled
1093 *
1094 * Return 0 on success, and negative on failure
1095 */
1096
1097static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1098 struct ethtool_coalesce *ec)
1099{
1100 return idpf_get_q_coalesce(netdev, ec, q_num);
1101}
1102
1103/**
1104 * __idpf_set_q_coalesce - set ITR values for specific queue
1105 * @ec: ethtool structure from user to update ITR settings
1106 * @q: queue for which itr values has to be set
1107 * @is_rxq: is queue type rx
1108 *
1109 * Returns 0 on success, negative otherwise.
1110 */
1111static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
1112 struct idpf_queue *q, bool is_rxq)
1113{
1114 u32 use_adaptive_coalesce, coalesce_usecs;
1115 struct idpf_q_vector *qv = q->q_vector;
1116 bool is_dim_ena = false;
1117 u16 itr_val;
1118
1119 if (is_rxq) {
1120 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1121 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1122 coalesce_usecs = ec->rx_coalesce_usecs;
1123 itr_val = qv->rx_itr_value;
1124 } else {
1125 is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1126 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1127 coalesce_usecs = ec->tx_coalesce_usecs;
1128 itr_val = qv->tx_itr_value;
1129 }
1130 if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1131 netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1132
1133 return -EINVAL;
1134 }
1135
1136 if (is_dim_ena && use_adaptive_coalesce)
1137 return 0;
1138
1139 if (coalesce_usecs > IDPF_ITR_MAX) {
1140 netdev_err(q->vport->netdev,
1141 "Invalid value, %d-usecs range is 0-%d\n",
1142 coalesce_usecs, IDPF_ITR_MAX);
1143
1144 return -EINVAL;
1145 }
1146
1147 if (coalesce_usecs % 2) {
1148 coalesce_usecs--;
1149 netdev_info(q->vport->netdev,
1150 "HW only supports even ITR values, ITR rounded to %d\n",
1151 coalesce_usecs);
1152 }
1153
1154 if (is_rxq) {
1155 qv->rx_itr_value = coalesce_usecs;
1156 if (use_adaptive_coalesce) {
1157 qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1158 } else {
1159 qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1160 idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
1161 false);
1162 }
1163 } else {
1164 qv->tx_itr_value = coalesce_usecs;
1165 if (use_adaptive_coalesce) {
1166 qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1167 } else {
1168 qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1169 idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
1170 }
1171 }
1172
1173 /* Update of static/dynamic itr will be taken care when interrupt is
1174 * fired
1175 */
1176 return 0;
1177}
1178
1179/**
1180 * idpf_set_q_coalesce - set ITR values for specific queue
1181 * @vport: vport associated to the queue that need updating
1182 * @ec: coalesce settings to program the device with
1183 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1184 * @is_rxq: is queue type rx
1185 *
1186 * Return 0 on success, and negative on failure
1187 */
1188static int idpf_set_q_coalesce(struct idpf_vport *vport,
1189 struct ethtool_coalesce *ec,
1190 int q_num, bool is_rxq)
1191{
1192 struct idpf_queue *q;
1193
1194 q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
1195
1196 if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
1197 return -EINVAL;
1198
1199 return 0;
1200}
1201
1202/**
1203 * idpf_set_coalesce - set ITR values as requested by user
1204 * @netdev: pointer to the netdev associated with this query
1205 * @ec: coalesce settings to program the device with
1206 * @kec: unused
1207 * @extack: unused
1208 *
1209 * Return 0 on success, and negative on failure
1210 */
1211static int idpf_set_coalesce(struct net_device *netdev,
1212 struct ethtool_coalesce *ec,
1213 struct kernel_ethtool_coalesce *kec,
1214 struct netlink_ext_ack *extack)
1215{
1216 struct idpf_netdev_priv *np = netdev_priv(netdev);
1217 struct idpf_vport *vport;
1218 int i, err = 0;
1219
1220 idpf_vport_ctrl_lock(netdev);
1221 vport = idpf_netdev_to_vport(netdev);
1222
1223 if (np->state != __IDPF_VPORT_UP)
1224 goto unlock_mutex;
1225
1226 for (i = 0; i < vport->num_txq; i++) {
1227 err = idpf_set_q_coalesce(vport, ec, i, false);
1228 if (err)
1229 goto unlock_mutex;
1230 }
1231
1232 for (i = 0; i < vport->num_rxq; i++) {
1233 err = idpf_set_q_coalesce(vport, ec, i, true);
1234 if (err)
1235 goto unlock_mutex;
1236 }
1237
1238unlock_mutex:
1239 idpf_vport_ctrl_unlock(netdev);
1240
1241 return err;
1242}
1243
1244/**
1245 * idpf_set_per_q_coalesce - set ITR values as requested by user
1246 * @netdev: pointer to the netdev associated with this query
1247 * @q_num: queue for which the itr values has to be set
1248 * @ec: coalesce settings to program the device with
1249 *
1250 * Return 0 on success, and negative on failure
1251 */
1252static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1253 struct ethtool_coalesce *ec)
1254{
1255 struct idpf_vport *vport;
1256 int err;
1257
1258 idpf_vport_ctrl_lock(netdev);
1259 vport = idpf_netdev_to_vport(netdev);
1260
1261 err = idpf_set_q_coalesce(vport, ec, q_num, false);
1262 if (err) {
1263 idpf_vport_ctrl_unlock(netdev);
1264
1265 return err;
1266 }
1267
1268 err = idpf_set_q_coalesce(vport, ec, q_num, true);
1269
1270 idpf_vport_ctrl_unlock(netdev);
1271
1272 return err;
1273}
1274
1275/**
1276 * idpf_get_msglevel - Get debug message level
1277 * @netdev: network interface device structure
1278 *
1279 * Returns current debug message level.
1280 */
1281static u32 idpf_get_msglevel(struct net_device *netdev)
1282{
1283 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1284
1285 return adapter->msg_enable;
1286}
1287
1288/**
1289 * idpf_set_msglevel - Set debug message level
1290 * @netdev: network interface device structure
1291 * @data: message level
1292 *
1293 * Set current debug message level. Higher values cause the driver to
1294 * be noisier.
1295 */
1296static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1297{
1298 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1299
1300 adapter->msg_enable = data;
1301}
1302
1303/**
1304 * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1305 * @netdev: network interface device structure
1306 * @cmd: ethtool command
1307 *
1308 * Reports speed/duplex settings.
1309 **/
1310static int idpf_get_link_ksettings(struct net_device *netdev,
1311 struct ethtool_link_ksettings *cmd)
1312{
1313 struct idpf_vport *vport;
1314
1315 idpf_vport_ctrl_lock(netdev);
1316 vport = idpf_netdev_to_vport(netdev);
1317
1318 ethtool_link_ksettings_zero_link_mode(cmd, supported);
1319 cmd->base.autoneg = AUTONEG_DISABLE;
1320 cmd->base.port = PORT_NONE;
1321 if (vport->link_up) {
1322 cmd->base.duplex = DUPLEX_FULL;
1323 cmd->base.speed = vport->link_speed_mbps;
1324 } else {
1325 cmd->base.duplex = DUPLEX_UNKNOWN;
1326 cmd->base.speed = SPEED_UNKNOWN;
1327 }
1328
1329 idpf_vport_ctrl_unlock(netdev);
1330
1331 return 0;
1332}
1333
1334static const struct ethtool_ops idpf_ethtool_ops = {
1335 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1336 ETHTOOL_COALESCE_USE_ADAPTIVE,
9b1aa3ef 1337 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
02cbfba1
AB
1338 .get_msglevel = idpf_get_msglevel,
1339 .set_msglevel = idpf_set_msglevel,
1340 .get_link = ethtool_op_get_link,
1341 .get_coalesce = idpf_get_coalesce,
1342 .set_coalesce = idpf_set_coalesce,
1343 .get_per_queue_coalesce = idpf_get_per_q_coalesce,
1344 .set_per_queue_coalesce = idpf_set_per_q_coalesce,
1345 .get_ethtool_stats = idpf_get_ethtool_stats,
1346 .get_strings = idpf_get_strings,
1347 .get_sset_count = idpf_get_sset_count,
1348 .get_channels = idpf_get_channels,
1349 .get_rxnfc = idpf_get_rxnfc,
1350 .get_rxfh_key_size = idpf_get_rxfh_key_size,
1351 .get_rxfh_indir_size = idpf_get_rxfh_indir_size,
1352 .get_rxfh = idpf_get_rxfh,
1353 .set_rxfh = idpf_set_rxfh,
1354 .set_channels = idpf_set_channels,
1355 .get_ringparam = idpf_get_ringparam,
1356 .set_ringparam = idpf_set_ringparam,
1357 .get_link_ksettings = idpf_get_link_ksettings,
1358};
1359
1360/**
1361 * idpf_set_ethtool_ops - Initialize ethtool ops struct
1362 * @netdev: network interface device structure
1363 *
1364 * Sets ethtool ops struct in our netdev so that ethtool can call
1365 * our functions.
1366 */
1367void idpf_set_ethtool_ops(struct net_device *netdev)
1368{
1369 netdev->ethtool_ops = &idpf_ethtool_ops;
1370}