]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/net/ethernet/intel/ice/ice_main.c
Merge tag 'at91-fixes-6.0-2' of https://git.kernel.org/pub/scm/linux/kernel/git/at91...
[people/ms/linux.git] / drivers / net / ethernet / intel / ice / ice_main.c
CommitLineData
837f08fd
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
34a2a3b8 8#include <generated/utsrelease.h>
837f08fd 9#include "ice.h"
eff380aa 10#include "ice_base.h"
45d3d428 11#include "ice_lib.h"
1b8f15b6 12#include "ice_fltr.h"
37b6f646 13#include "ice_dcb_lib.h"
b94b013e 14#include "ice_dcb_nl.h"
1adf7ead 15#include "ice_devlink.h"
3089cf6d
JB
16/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17 * ice tracepoint functions. This must be done exactly once across the
18 * ice driver.
19 */
20#define CREATE_TRACE_POINTS
21#include "ice_trace.h"
b3be918d 22#include "ice_eswitch.h"
0d08a441 23#include "ice_tc_lib.h"
c31af68a 24#include "ice_vsi_vlan_ops.h"
837f08fd 25
837f08fd 26#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
837f08fd
AV
27static const char ice_driver_string[] = DRV_SUMMARY;
28static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
29
462acf6a
TN
30/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
31#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
32#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
33
837f08fd
AV
34MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35MODULE_DESCRIPTION(DRV_SUMMARY);
98674ebe 36MODULE_LICENSE("GPL v2");
462acf6a 37MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
837f08fd
AV
38
39static int debug = -1;
40module_param(debug, int, 0644);
7ec59eea
AV
41#ifndef CONFIG_DYNAMIC_DEBUG
42MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
43#else
44MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
45#endif /* !CONFIG_DYNAMIC_DEBUG */
837f08fd 46
d25a0fc4 47static DEFINE_IDA(ice_aux_ida);
22bf877e
MF
48DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49EXPORT_SYMBOL(ice_xdp_locking_key);
d25a0fc4 50
649c87c6
JK
51/**
52 * ice_hw_to_dev - Get device pointer from the hardware structure
53 * @hw: pointer to the device HW structure
54 *
55 * Used to access the device pointer from compilation units which can't easily
56 * include the definition of struct ice_pf without leading to circular header
57 * dependencies.
58 */
59struct device *ice_hw_to_dev(struct ice_hw *hw)
60{
61 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62
63 return &pf->pdev->dev;
64}
65
940b61af 66static struct workqueue_struct *ice_wq;
462acf6a 67static const struct net_device_ops ice_netdev_safe_mode_ops;
cdedef59 68static const struct net_device_ops ice_netdev_ops;
940b61af 69
462acf6a 70static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
28c2a645 71
0f9d5027 72static void ice_vsi_release_all(struct ice_pf *pf);
3a858ba3 73
fbc7b27a
KP
74static int ice_rebuild_channels(struct ice_pf *pf);
75static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
76
195bb48f
MS
77static int
78ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79 void *cb_priv, enum tc_setup_type type, void *type_data,
80 void *data,
81 void (*cleanup)(struct flow_block_cb *block_cb));
82
df006dd4
DE
83bool netif_is_ice(struct net_device *dev)
84{
85 return dev && (dev->netdev_ops == &ice_netdev_ops);
86}
87
b3969fd7
SM
88/**
89 * ice_get_tx_pending - returns number of Tx descriptors not processed
90 * @ring: the ring of descriptors
91 */
e72bba21 92static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
b3969fd7 93{
c1ddf1f5 94 u16 head, tail;
b3969fd7
SM
95
96 head = ring->next_to_clean;
c1ddf1f5 97 tail = ring->next_to_use;
b3969fd7
SM
98
99 if (head != tail)
100 return (head < tail) ?
101 tail - head : (tail + ring->count - head);
102 return 0;
103}
104
105/**
106 * ice_check_for_hang_subtask - check for and recover hung queues
107 * @pf: pointer to PF struct
108 */
109static void ice_check_for_hang_subtask(struct ice_pf *pf)
110{
111 struct ice_vsi *vsi = NULL;
e89e899f 112 struct ice_hw *hw;
b3969fd7 113 unsigned int i;
b3969fd7 114 int packets;
e89e899f 115 u32 v;
b3969fd7
SM
116
117 ice_for_each_vsi(pf, v)
118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
119 vsi = pf->vsi[v];
120 break;
121 }
122
e97fb1ae 123 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
b3969fd7
SM
124 return;
125
126 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
127 return;
128
e89e899f
BC
129 hw = &vsi->back->hw;
130
2faf63b6 131 ice_for_each_txq(vsi, i) {
e72bba21 132 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
b3969fd7 133
fbc7b27a
KP
134 if (!tx_ring)
135 continue;
136 if (ice_ring_ch_enabled(tx_ring))
137 continue;
138
139 if (tx_ring->desc) {
b3969fd7
SM
140 /* If packet counter has not changed the queue is
141 * likely stalled, so force an interrupt for this
142 * queue.
143 *
144 * prev_pkt would be negative if there was no
145 * pending work.
146 */
147 packets = tx_ring->stats.pkts & INT_MAX;
148 if (tx_ring->tx_stats.prev_pkt == packets) {
149 /* Trigger sw interrupt to revive the queue */
e89e899f 150 ice_trigger_sw_intr(hw, tx_ring->q_vector);
b3969fd7
SM
151 continue;
152 }
153
154 /* Memory barrier between read of packet count and call
155 * to ice_get_tx_pending()
156 */
157 smp_rmb();
158 tx_ring->tx_stats.prev_pkt =
159 ice_get_tx_pending(tx_ring) ? packets : -1;
160 }
161 }
162}
163
561f4379
TN
164/**
165 * ice_init_mac_fltr - Set initial MAC filters
166 * @pf: board private structure
167 *
2f2da36e 168 * Set initial set of MAC filters for PF VSI; configure filters for permanent
561f4379
TN
169 * address and broadcast address. If an error is encountered, netdevice will be
170 * unregistered.
171 */
172static int ice_init_mac_fltr(struct ice_pf *pf)
173{
561f4379 174 struct ice_vsi *vsi;
1b8f15b6 175 u8 *perm_addr;
561f4379 176
208ff751 177 vsi = ice_get_main_vsi(pf);
561f4379
TN
178 if (!vsi)
179 return -EINVAL;
180
1b8f15b6 181 perm_addr = vsi->port_info->mac.perm_addr;
c1484691 182 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
561f4379
TN
183}
184
e94d4478 185/**
f9867df6 186 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
e94d4478 187 * @netdev: the net device on which the sync is happening
f9867df6 188 * @addr: MAC address to sync
e94d4478
AV
189 *
190 * This is a callback function which is called by the in kernel device sync
191 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
192 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
f9867df6 193 * MAC filters from the hardware.
e94d4478
AV
194 */
195static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
196{
197 struct ice_netdev_priv *np = netdev_priv(netdev);
198 struct ice_vsi *vsi = np->vsi;
199
1b8f15b6
MS
200 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
201 ICE_FWD_TO_VSI))
e94d4478
AV
202 return -EINVAL;
203
204 return 0;
205}
206
207/**
f9867df6 208 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
e94d4478 209 * @netdev: the net device on which the unsync is happening
f9867df6 210 * @addr: MAC address to unsync
e94d4478
AV
211 *
212 * This is a callback function which is called by the in kernel device unsync
213 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
214 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
f9867df6 215 * delete the MAC filters from the hardware.
e94d4478
AV
216 */
217static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
218{
219 struct ice_netdev_priv *np = netdev_priv(netdev);
220 struct ice_vsi *vsi = np->vsi;
221
3ba7f53f
BC
222 /* Under some circumstances, we might receive a request to delete our
223 * own device address from our uc list. Because we store the device
224 * address in the VSI's MAC filter list, we need to ignore such
225 * requests and not delete our device address from this list.
226 */
227 if (ether_addr_equal(addr, netdev->dev_addr))
228 return 0;
229
1b8f15b6
MS
230 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
231 ICE_FWD_TO_VSI))
e94d4478
AV
232 return -EINVAL;
233
234 return 0;
235}
236
e94d4478
AV
237/**
238 * ice_vsi_fltr_changed - check if filter state changed
239 * @vsi: VSI to be checked
240 *
241 * returns true if filter state has changed, false otherwise.
242 */
243static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
244{
e97fb1ae 245 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
1273f895 246 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
e94d4478
AV
247}
248
5eda8afd 249/**
fabf480b 250 * ice_set_promisc - Enable promiscuous mode for a given PF
5eda8afd
AA
251 * @vsi: the VSI being configured
252 * @promisc_m: mask of promiscuous config bits
5eda8afd
AA
253 *
254 */
fabf480b 255static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
5eda8afd 256{
5e24d598 257 int status;
5eda8afd
AA
258
259 if (vsi->type != ICE_VSI_PF)
260 return 0;
261
1273f895
IV
262 if (ice_vsi_has_non_zero_vlans(vsi)) {
263 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
264 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
265 promisc_m);
266 } else {
267 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
268 promisc_m, 0);
269 }
abddafd4
GS
270 if (status && status != -EEXIST)
271 return status;
1273f895 272
abddafd4 273 return 0;
fabf480b
BC
274}
275
276/**
277 * ice_clear_promisc - Disable promiscuous mode for a given PF
278 * @vsi: the VSI being configured
279 * @promisc_m: mask of promiscuous config bits
280 *
281 */
282static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
283{
5e24d598 284 int status;
fabf480b
BC
285
286 if (vsi->type != ICE_VSI_PF)
287 return 0;
5eda8afd 288
1273f895
IV
289 if (ice_vsi_has_non_zero_vlans(vsi)) {
290 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
291 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
292 promisc_m);
293 } else {
294 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
295 promisc_m, 0);
296 }
297
c1484691 298 return status;
5eda8afd
AA
299}
300
4b889474
MS
301/**
302 * ice_get_devlink_port - Get devlink port from netdev
303 * @netdev: the netdevice structure
304 */
305static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
306{
307 struct ice_pf *pf = ice_netdev_to_pf(netdev);
308
309 if (!ice_is_switchdev_running(pf))
310 return NULL;
311
312 return &pf->devlink_port;
313}
314
e94d4478
AV
315/**
316 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
317 * @vsi: ptr to the VSI
318 *
319 * Push any outstanding VSI filter changes through the AdminQ.
320 */
321static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
322{
c31af68a 323 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
9a946843 324 struct device *dev = ice_pf_to_dev(vsi->back);
e94d4478
AV
325 struct net_device *netdev = vsi->netdev;
326 bool promisc_forced_on = false;
327 struct ice_pf *pf = vsi->back;
328 struct ice_hw *hw = &pf->hw;
e94d4478 329 u32 changed_flags = 0;
2ccc1c1c 330 int err;
e94d4478
AV
331
332 if (!vsi->netdev)
333 return -EINVAL;
334
7e408e07 335 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
e94d4478
AV
336 usleep_range(1000, 2000);
337
338 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
339 vsi->current_netdev_flags = vsi->netdev->flags;
340
341 INIT_LIST_HEAD(&vsi->tmp_sync_list);
342 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
343
344 if (ice_vsi_fltr_changed(vsi)) {
e97fb1ae
AV
345 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
346 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
e94d4478
AV
347
348 /* grab the netdev's addr_list_lock */
349 netif_addr_lock_bh(netdev);
350 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
351 ice_add_mac_to_unsync_list);
352 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
353 ice_add_mac_to_unsync_list);
354 /* our temp lists are populated. release lock */
355 netif_addr_unlock_bh(netdev);
356 }
357
f9867df6 358 /* Remove MAC addresses in the unsync list */
2ccc1c1c 359 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
1b8f15b6 360 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
2ccc1c1c 361 if (err) {
e94d4478
AV
362 netdev_err(netdev, "Failed to delete MAC filters\n");
363 /* if we failed because of alloc failures, just bail */
2ccc1c1c 364 if (err == -ENOMEM)
e94d4478 365 goto out;
e94d4478
AV
366 }
367
f9867df6 368 /* Add MAC addresses in the sync list */
2ccc1c1c 369 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
1b8f15b6 370 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
89f3e4a5
PB
371 /* If filter is added successfully or already exists, do not go into
372 * 'if' condition and report it as error. Instead continue processing
373 * rest of the function.
374 */
2ccc1c1c 375 if (err && err != -EEXIST) {
e94d4478 376 netdev_err(netdev, "Failed to add MAC filters\n");
f9867df6 377 /* If there is no more space for new umac filters, VSI
e94d4478
AV
378 * should go into promiscuous mode. There should be some
379 * space reserved for promiscuous filters.
380 */
381 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
7e408e07 382 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
e94d4478
AV
383 vsi->state)) {
384 promisc_forced_on = true;
19cce2c6 385 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
e94d4478
AV
386 vsi->vsi_num);
387 } else {
e94d4478
AV
388 goto out;
389 }
390 }
2ccc1c1c 391 err = 0;
e94d4478 392 /* check for changes in promiscuous modes */
5eda8afd
AA
393 if (changed_flags & IFF_ALLMULTI) {
394 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
1273f895 395 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
5eda8afd 396 if (err) {
5eda8afd
AA
397 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 goto out_promisc;
399 }
92ace482
BA
400 } else {
401 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
1273f895 402 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
5eda8afd 403 if (err) {
5eda8afd
AA
404 vsi->current_netdev_flags |= IFF_ALLMULTI;
405 goto out_promisc;
406 }
407 }
408 }
e94d4478
AV
409
410 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
e97fb1ae
AV
411 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
412 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
e94d4478 413 if (vsi->current_netdev_flags & IFF_PROMISC) {
f9867df6 414 /* Apply Rx filter rule to get traffic from wire */
d7393425
MW
415 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
416 err = ice_set_dflt_vsi(vsi);
fc0f39bc 417 if (err && err != -EEXIST) {
19cce2c6 418 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
fc0f39bc
BC
419 err, vsi->vsi_num);
420 vsi->current_netdev_flags &=
421 ~IFF_PROMISC;
422 goto out_promisc;
423 }
2ccc1c1c 424 err = 0;
c31af68a 425 vlan_ops->dis_rx_filtering(vsi);
e94d4478
AV
426 }
427 } else {
f9867df6 428 /* Clear Rx filter to remove traffic from wire */
d7393425
MW
429 if (ice_is_vsi_dflt_vsi(vsi)) {
430 err = ice_clear_dflt_vsi(vsi);
fc0f39bc 431 if (err) {
19cce2c6 432 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
fc0f39bc
BC
433 err, vsi->vsi_num);
434 vsi->current_netdev_flags |=
435 IFF_PROMISC;
436 goto out_promisc;
437 }
7dc839fe 438 if (vsi->netdev->features &
1babaf77 439 NETIF_F_HW_VLAN_CTAG_FILTER)
c31af68a 440 vlan_ops->ena_rx_filtering(vsi);
e94d4478
AV
441 }
442 }
443 }
444 goto exit;
445
446out_promisc:
e97fb1ae 447 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
e94d4478
AV
448 goto exit;
449out:
450 /* if something went wrong then set the changed flag so we try again */
e97fb1ae
AV
451 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
452 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
e94d4478 453exit:
7e408e07 454 clear_bit(ICE_CFG_BUSY, vsi->state);
e94d4478
AV
455 return err;
456}
457
458/**
459 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
460 * @pf: board private structure
461 */
462static void ice_sync_fltr_subtask(struct ice_pf *pf)
463{
464 int v;
465
466 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
467 return;
468
469 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
470
80ed404a 471 ice_for_each_vsi(pf, v)
e94d4478
AV
472 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
473 ice_vsi_sync_fltr(pf->vsi[v])) {
474 /* come back and try again later */
475 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
476 break;
477 }
478}
479
7b9ffc76
AV
480/**
481 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
482 * @pf: the PF
483 * @locked: is the rtnl_lock already held
484 */
7b9ffc76 485static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
7b9ffc76 486{
b126bd6b 487 int node;
7b9ffc76
AV
488 int v;
489
490 ice_for_each_vsi(pf, v)
491 if (pf->vsi[v])
492 ice_dis_vsi(pf->vsi[v], locked);
b126bd6b
KP
493
494 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
495 pf->pf_agg_node[node].num_vsis = 0;
496
497 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
498 pf->vf_agg_node[node].num_vsis = 0;
7b9ffc76
AV
499}
500
c1e5da5d
WD
501/**
502 * ice_clear_sw_switch_recipes - clear switch recipes
503 * @pf: board private structure
504 *
505 * Mark switch recipes as not created in sw structures. There are cases where
506 * rules (especially advanced rules) need to be restored, either re-read from
507 * hardware or added again. For example after the reset. 'recp_created' flag
508 * prevents from doing that and need to be cleared upfront.
509 */
510static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
511{
512 struct ice_sw_recipe *recp;
513 u8 i;
514
515 recp = pf->hw.switch_info->recp_list;
516 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
517 recp[i].recp_created = false;
518}
519
0b28b702 520/**
fbc7b27a 521 * ice_prepare_for_reset - prep for reset
0b28b702 522 * @pf: board private structure
fbc7b27a 523 * @reset_type: reset type requested
0b28b702
AV
524 *
525 * Inform or close all dependent features in prep for reset.
526 */
527static void
fbc7b27a 528ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
0b28b702
AV
529{
530 struct ice_hw *hw = &pf->hw;
fbc7b27a 531 struct ice_vsi *vsi;
c4c2c7db
JK
532 struct ice_vf *vf;
533 unsigned int bkt;
0b28b702 534
fbc7b27a
KP
535 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
536
5abac9d7 537 /* already prepared for reset */
7e408e07 538 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
5abac9d7
BC
539 return;
540
f9f5301e
DE
541 ice_unplug_aux_dev(pf);
542
007676b4
AV
543 /* Notify VFs of impending reset */
544 if (ice_check_sq_alive(hw, &hw->mailboxq))
545 ice_vc_notify_reset(pf);
546
c7aeb4d1 547 /* Disable VFs until reset is completed */
3d5985a1 548 mutex_lock(&pf->vfs.table_lock);
c4c2c7db
JK
549 ice_for_each_vf(pf, bkt, vf)
550 ice_set_vf_state_qs_dis(vf);
3d5985a1 551 mutex_unlock(&pf->vfs.table_lock);
c7aeb4d1 552
c1e5da5d
WD
553 if (ice_is_eswitch_mode_switchdev(pf)) {
554 if (reset_type != ICE_RESET_PFR)
555 ice_clear_sw_switch_recipes(pf);
556 }
557
fbc7b27a
KP
558 /* release ADQ specific HW and SW resources */
559 vsi = ice_get_main_vsi(pf);
560 if (!vsi)
561 goto skip;
562
563 /* to be on safe side, reset orig_rss_size so that normal flow
564 * of deciding rss_size can take precedence
565 */
566 vsi->orig_rss_size = 0;
567
568 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
569 if (reset_type == ICE_RESET_PFR) {
570 vsi->old_ena_tc = vsi->all_enatc;
571 vsi->old_numtc = vsi->all_numtc;
572 } else {
573 ice_remove_q_channels(vsi, true);
574
575 /* for other reset type, do not support channel rebuild
576 * hence reset needed info
577 */
578 vsi->old_ena_tc = 0;
579 vsi->all_enatc = 0;
580 vsi->old_numtc = 0;
581 vsi->all_numtc = 0;
582 vsi->req_txq = 0;
583 vsi->req_rxq = 0;
584 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
585 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
586 }
587 }
588skip:
589
462acf6a
TN
590 /* clear SW filtering DB */
591 ice_clear_hw_tbls(hw);
0b28b702 592 /* disable the VSIs and their queues that are not already DOWN */
7b9ffc76 593 ice_pf_dis_all_vsi(pf, false);
0b28b702 594
06c16d89 595 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
48096710 596 ice_ptp_prepare_for_reset(pf);
06c16d89 597
43113ff7
KK
598 if (ice_is_feature_supported(pf, ICE_F_GNSS))
599 ice_gnss_exit(pf);
600
c5a2a4a3
UK
601 if (hw->port_info)
602 ice_sched_clear_port(hw->port_info);
603
0b28b702 604 ice_shutdown_all_ctrlq(hw);
0f9d5027 605
7e408e07 606 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
0b28b702
AV
607}
608
609/**
610 * ice_do_reset - Initiate one of many types of resets
611 * @pf: board private structure
fbc7b27a 612 * @reset_type: reset type requested before this function was called.
0b28b702
AV
613 */
614static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
615{
4015d11e 616 struct device *dev = ice_pf_to_dev(pf);
0b28b702
AV
617 struct ice_hw *hw = &pf->hw;
618
619 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
0b28b702 620
fbc7b27a 621 ice_prepare_for_reset(pf, reset_type);
0b28b702
AV
622
623 /* trigger the reset */
624 if (ice_reset(hw, reset_type)) {
625 dev_err(dev, "reset %d failed\n", reset_type);
7e408e07
AV
626 set_bit(ICE_RESET_FAILED, pf->state);
627 clear_bit(ICE_RESET_OICR_RECV, pf->state);
628 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
629 clear_bit(ICE_PFR_REQ, pf->state);
630 clear_bit(ICE_CORER_REQ, pf->state);
631 clear_bit(ICE_GLOBR_REQ, pf->state);
1c08052e 632 wake_up(&pf->reset_wait_queue);
0b28b702
AV
633 return;
634 }
635
0f9d5027
AV
636 /* PFR is a bit of a special case because it doesn't result in an OICR
637 * interrupt. So for PFR, rebuild after the reset and clear the reset-
638 * associated state bits.
639 */
0b28b702
AV
640 if (reset_type == ICE_RESET_PFR) {
641 pf->pfr_count++;
462acf6a 642 ice_rebuild(pf, reset_type);
7e408e07
AV
643 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
644 clear_bit(ICE_PFR_REQ, pf->state);
1c08052e 645 wake_up(&pf->reset_wait_queue);
dac57288 646 ice_reset_all_vfs(pf);
0b28b702
AV
647 }
648}
649
650/**
651 * ice_reset_subtask - Set up for resetting the device and driver
652 * @pf: board private structure
653 */
654static void ice_reset_subtask(struct ice_pf *pf)
655{
0f9d5027 656 enum ice_reset_req reset_type = ICE_RESET_INVAL;
0b28b702
AV
657
658 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
0f9d5027
AV
659 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
660 * of reset is pending and sets bits in pf->state indicating the reset
7e408e07 661 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
0f9d5027
AV
662 * prepare for pending reset if not already (for PF software-initiated
663 * global resets the software should already be prepared for it as
7e408e07 664 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
0f9d5027
AV
665 * by firmware or software on other PFs, that bit is not set so prepare
666 * for the reset now), poll for reset done, rebuild and return.
0b28b702 667 */
7e408e07 668 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
2ebd4428 669 /* Perform the largest reset requested */
7e408e07 670 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
2ebd4428 671 reset_type = ICE_RESET_CORER;
7e408e07 672 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
2ebd4428 673 reset_type = ICE_RESET_GLOBR;
7e408e07 674 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
03af8406 675 reset_type = ICE_RESET_EMPR;
2ebd4428
DE
676 /* return if no valid reset type requested */
677 if (reset_type == ICE_RESET_INVAL)
678 return;
fbc7b27a 679 ice_prepare_for_reset(pf, reset_type);
0b28b702
AV
680
681 /* make sure we are ready to rebuild */
fd2a9817 682 if (ice_check_reset(&pf->hw)) {
7e408e07 683 set_bit(ICE_RESET_FAILED, pf->state);
fd2a9817
AV
684 } else {
685 /* done with reset. start rebuild */
686 pf->hw.reset_ongoing = false;
462acf6a 687 ice_rebuild(pf, reset_type);
0f9d5027 688 /* clear bit to resume normal operations, but
94c4441b 689 * ICE_NEEDS_RESTART bit is set in case rebuild failed
0f9d5027 690 */
7e408e07
AV
691 clear_bit(ICE_RESET_OICR_RECV, pf->state);
692 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
693 clear_bit(ICE_PFR_REQ, pf->state);
694 clear_bit(ICE_CORER_REQ, pf->state);
695 clear_bit(ICE_GLOBR_REQ, pf->state);
1c08052e 696 wake_up(&pf->reset_wait_queue);
dac57288 697 ice_reset_all_vfs(pf);
fd2a9817 698 }
0f9d5027
AV
699
700 return;
0b28b702
AV
701 }
702
703 /* No pending resets to finish processing. Check for new resets */
7e408e07 704 if (test_bit(ICE_PFR_REQ, pf->state))
0f9d5027 705 reset_type = ICE_RESET_PFR;
7e408e07 706 if (test_bit(ICE_CORER_REQ, pf->state))
0f9d5027 707 reset_type = ICE_RESET_CORER;
7e408e07 708 if (test_bit(ICE_GLOBR_REQ, pf->state))
0b28b702 709 reset_type = ICE_RESET_GLOBR;
0f9d5027
AV
710 /* If no valid reset type requested just return */
711 if (reset_type == ICE_RESET_INVAL)
712 return;
0b28b702 713
0f9d5027 714 /* reset if not already down or busy */
7e408e07
AV
715 if (!test_bit(ICE_DOWN, pf->state) &&
716 !test_bit(ICE_CFG_BUSY, pf->state)) {
0b28b702
AV
717 ice_do_reset(pf, reset_type);
718 }
0b28b702
AV
719}
720
2e0ab37c
JB
721/**
722 * ice_print_topo_conflict - print topology conflict message
723 * @vsi: the VSI whose topology status is being checked
724 */
725static void ice_print_topo_conflict(struct ice_vsi *vsi)
726{
727 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
728 case ICE_AQ_LINK_TOPO_CONFLICT:
729 case ICE_AQ_LINK_MEDIA_CONFLICT:
5878589d
PG
730 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
731 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
732 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
5c57145a 733 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
2e0ab37c 734 break;
5878589d 735 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
4fc5fbee
AV
736 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
737 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
738 else
739 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
5878589d 740 break;
2e0ab37c
JB
741 default:
742 break;
743 }
744}
745
cdedef59
AV
746/**
747 * ice_print_link_msg - print link up or down message
748 * @vsi: the VSI whose link status is being queried
749 * @isup: boolean for if the link is now up or down
750 */
fcea6f3d 751void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
cdedef59 752{
f776b3ac 753 struct ice_aqc_get_phy_caps_data *caps;
5ee30564 754 const char *an_advertised;
f776b3ac 755 const char *fec_req;
cdedef59 756 const char *speed;
f776b3ac 757 const char *fec;
cdedef59 758 const char *fc;
43260988 759 const char *an;
5518ac2a 760 int status;
cdedef59 761
c2a23e00
BC
762 if (!vsi)
763 return;
764
cdedef59
AV
765 if (vsi->current_isup == isup)
766 return;
767
768 vsi->current_isup = isup;
769
770 if (!isup) {
771 netdev_info(vsi->netdev, "NIC Link is Down\n");
772 return;
773 }
774
775 switch (vsi->port_info->phy.link_info.link_speed) {
072efdf8
AV
776 case ICE_AQ_LINK_SPEED_100GB:
777 speed = "100 G";
778 break;
779 case ICE_AQ_LINK_SPEED_50GB:
780 speed = "50 G";
781 break;
cdedef59
AV
782 case ICE_AQ_LINK_SPEED_40GB:
783 speed = "40 G";
784 break;
785 case ICE_AQ_LINK_SPEED_25GB:
786 speed = "25 G";
787 break;
788 case ICE_AQ_LINK_SPEED_20GB:
789 speed = "20 G";
790 break;
791 case ICE_AQ_LINK_SPEED_10GB:
792 speed = "10 G";
793 break;
794 case ICE_AQ_LINK_SPEED_5GB:
795 speed = "5 G";
796 break;
797 case ICE_AQ_LINK_SPEED_2500MB:
798 speed = "2.5 G";
799 break;
800 case ICE_AQ_LINK_SPEED_1000MB:
801 speed = "1 G";
802 break;
803 case ICE_AQ_LINK_SPEED_100MB:
804 speed = "100 M";
805 break;
806 default:
5b13886d 807 speed = "Unknown ";
cdedef59
AV
808 break;
809 }
810
811 switch (vsi->port_info->fc.current_mode) {
812 case ICE_FC_FULL:
2f2da36e 813 fc = "Rx/Tx";
cdedef59
AV
814 break;
815 case ICE_FC_TX_PAUSE:
2f2da36e 816 fc = "Tx";
cdedef59
AV
817 break;
818 case ICE_FC_RX_PAUSE:
2f2da36e 819 fc = "Rx";
cdedef59 820 break;
203a068a
BC
821 case ICE_FC_NONE:
822 fc = "None";
823 break;
cdedef59
AV
824 default:
825 fc = "Unknown";
826 break;
827 }
828
f776b3ac
PG
829 /* Get FEC mode based on negotiated link info */
830 switch (vsi->port_info->phy.link_info.fec_info) {
831 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
f776b3ac
PG
832 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
833 fec = "RS-FEC";
834 break;
835 case ICE_AQ_LINK_25G_KR_FEC_EN:
836 fec = "FC-FEC/BASE-R";
837 break;
838 default:
839 fec = "NONE";
840 break;
841 }
842
43260988
JB
843 /* check if autoneg completed, might be false due to not supported */
844 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
845 an = "True";
846 else
847 an = "False";
848
f776b3ac 849 /* Get FEC mode requested based on PHY caps last SW configuration */
9efe35d0 850 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
f776b3ac
PG
851 if (!caps) {
852 fec_req = "Unknown";
5ee30564 853 an_advertised = "Unknown";
f776b3ac
PG
854 goto done;
855 }
856
857 status = ice_aq_get_phy_caps(vsi->port_info, false,
d6730a87 858 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
f776b3ac
PG
859 if (status)
860 netdev_info(vsi->netdev, "Get phy capability failed.\n");
861
5ee30564
PG
862 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
863
f776b3ac
PG
864 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
865 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
866 fec_req = "RS-FEC";
867 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
868 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
869 fec_req = "FC-FEC/BASE-R";
870 else
871 fec_req = "NONE";
872
9efe35d0 873 kfree(caps);
f776b3ac
PG
874
875done:
5ee30564
PG
876 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
877 speed, fec_req, fec, an_advertised, an, fc);
2e0ab37c 878 ice_print_topo_conflict(vsi);
cdedef59
AV
879}
880
0b28b702 881/**
f9867df6
AV
882 * ice_vsi_link_event - update the VSI's netdev
883 * @vsi: the VSI on which the link event occurred
884 * @link_up: whether or not the VSI needs to be set up or down
0b28b702
AV
885 */
886static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
887{
c2a23e00
BC
888 if (!vsi)
889 return;
890
e97fb1ae 891 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
0b28b702
AV
892 return;
893
894 if (vsi->type == ICE_VSI_PF) {
c2a23e00 895 if (link_up == netif_carrier_ok(vsi->netdev))
0b28b702 896 return;
c2a23e00 897
0b28b702
AV
898 if (link_up) {
899 netif_carrier_on(vsi->netdev);
900 netif_tx_wake_all_queues(vsi->netdev);
901 } else {
902 netif_carrier_off(vsi->netdev);
903 netif_tx_stop_all_queues(vsi->netdev);
904 }
905 }
906}
907
7d9c9b79
DE
908/**
909 * ice_set_dflt_mib - send a default config MIB to the FW
910 * @pf: private PF struct
911 *
912 * This function sends a default configuration MIB to the FW.
913 *
914 * If this function errors out at any point, the driver is still able to
915 * function. The main impact is that LFC may not operate as expected.
916 * Therefore an error state in this function should be treated with a DBG
917 * message and continue on with driver rebuild/reenable.
918 */
919static void ice_set_dflt_mib(struct ice_pf *pf)
920{
921 struct device *dev = ice_pf_to_dev(pf);
922 u8 mib_type, *buf, *lldpmib = NULL;
923 u16 len, typelen, offset = 0;
924 struct ice_lldp_org_tlv *tlv;
12aae8f1 925 struct ice_hw *hw = &pf->hw;
7d9c9b79
DE
926 u32 ouisubtype;
927
7d9c9b79
DE
928 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
929 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
930 if (!lldpmib) {
931 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
932 __func__);
933 return;
934 }
935
936 /* Add ETS CFG TLV */
937 tlv = (struct ice_lldp_org_tlv *)lldpmib;
938 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
939 ICE_IEEE_ETS_TLV_LEN);
940 tlv->typelen = htons(typelen);
941 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
942 ICE_IEEE_SUBTYPE_ETS_CFG);
943 tlv->ouisubtype = htonl(ouisubtype);
944
945 buf = tlv->tlvinfo;
946 buf[0] = 0;
947
948 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
949 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
950 * Octets 13 - 20 are TSA values - leave as zeros
951 */
952 buf[5] = 0x64;
953 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
954 offset += len + 2;
955 tlv = (struct ice_lldp_org_tlv *)
956 ((char *)tlv + sizeof(tlv->typelen) + len);
957
958 /* Add ETS REC TLV */
959 buf = tlv->tlvinfo;
960 tlv->typelen = htons(typelen);
961
962 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
963 ICE_IEEE_SUBTYPE_ETS_REC);
964 tlv->ouisubtype = htonl(ouisubtype);
965
966 /* First octet of buf is reserved
967 * Octets 1 - 4 map UP to TC - all UPs map to zero
968 * Octets 5 - 12 are BW values - set TC 0 to 100%.
969 * Octets 13 - 20 are TSA value - leave as zeros
970 */
971 buf[5] = 0x64;
972 offset += len + 2;
973 tlv = (struct ice_lldp_org_tlv *)
974 ((char *)tlv + sizeof(tlv->typelen) + len);
975
976 /* Add PFC CFG TLV */
977 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
978 ICE_IEEE_PFC_TLV_LEN);
979 tlv->typelen = htons(typelen);
980
981 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
982 ICE_IEEE_SUBTYPE_PFC_CFG);
983 tlv->ouisubtype = htonl(ouisubtype);
984
985 /* Octet 1 left as all zeros - PFC disabled */
986 buf[0] = 0x08;
987 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
988 offset += len + 2;
989
990 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
991 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
992
993 kfree(lldpmib);
994}
995
99d40752
BC
996/**
997 * ice_check_phy_fw_load - check if PHY FW load failed
998 * @pf: pointer to PF struct
999 * @link_cfg_err: bitmap from the link info structure
1000 *
1001 * check if external PHY FW load failed and print an error message if it did
1002 */
1003static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1004{
1005 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1006 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1007 return;
1008 }
1009
1010 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1011 return;
1012
1013 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1014 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1015 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1016 }
1017}
1018
c77849f5
AV
1019/**
1020 * ice_check_module_power
1021 * @pf: pointer to PF struct
1022 * @link_cfg_err: bitmap from the link info structure
1023 *
1024 * check module power level returned by a previous call to aq_get_link_info
1025 * and print error messages if module power level is not supported
1026 */
1027static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1028{
1029 /* if module power level is supported, clear the flag */
1030 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1031 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1032 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1033 return;
1034 }
1035
1036 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1037 * above block didn't clear this bit, there's nothing to do
1038 */
1039 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1040 return;
1041
1042 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1043 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1044 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1045 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1046 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1047 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1048 }
1049}
1050
99d40752
BC
1051/**
1052 * ice_check_link_cfg_err - check if link configuration failed
1053 * @pf: pointer to the PF struct
1054 * @link_cfg_err: bitmap from the link info structure
1055 *
1056 * print if any link configuration failure happens due to the value in the
1057 * link_cfg_err parameter in the link info structure
1058 */
1059static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1060{
1061 ice_check_module_power(pf, link_cfg_err);
1062 ice_check_phy_fw_load(pf, link_cfg_err);
1063}
1064
0b28b702
AV
1065/**
1066 * ice_link_event - process the link event
2f2da36e 1067 * @pf: PF that the link event is associated with
0b28b702 1068 * @pi: port_info for the port that the link event is associated with
c2a23e00
BC
1069 * @link_up: true if the physical link is up and false if it is down
1070 * @link_speed: current link speed received from the link event
0b28b702 1071 *
c2a23e00 1072 * Returns 0 on success and negative on failure
0b28b702
AV
1073 */
1074static int
c2a23e00
BC
1075ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1076 u16 link_speed)
0b28b702 1077{
4015d11e 1078 struct device *dev = ice_pf_to_dev(pf);
0b28b702 1079 struct ice_phy_info *phy_info;
c2a23e00
BC
1080 struct ice_vsi *vsi;
1081 u16 old_link_speed;
1082 bool old_link;
5518ac2a 1083 int status;
0b28b702
AV
1084
1085 phy_info = &pi->phy;
1086 phy_info->link_info_old = phy_info->link_info;
0b28b702 1087
c2a23e00 1088 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
0b28b702
AV
1089 old_link_speed = phy_info->link_info_old.link_speed;
1090
c2a23e00
BC
1091 /* update the link info structures and re-enable link events,
1092 * don't bail on failure due to other book keeping needed
1093 */
d348d517
AV
1094 status = ice_update_link_info(pi);
1095 if (status)
5f87ec48
TN
1096 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1097 pi->lport, status,
d348d517 1098 ice_aq_str(pi->hw->adminq.sq_last_status));
0b28b702 1099
99d40752 1100 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
c77849f5 1101
0ce6c34a
DE
1102 /* Check if the link state is up after updating link info, and treat
1103 * this event as an UP event since the link is actually UP now.
1104 */
1105 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1106 link_up = true;
1107
208ff751 1108 vsi = ice_get_main_vsi(pf);
c2a23e00
BC
1109 if (!vsi || !vsi->port_info)
1110 return -EINVAL;
0b28b702 1111
6d599946
TN
1112 /* turn off PHY if media was removed */
1113 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1114 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1115 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
d348d517 1116 ice_set_link(vsi, false);
6d599946
TN
1117 }
1118
1a3571b5
PG
1119 /* if the old link up/down and speed is the same as the new */
1120 if (link_up == old_link && link_speed == old_link_speed)
d348d517 1121 return 0;
1a3571b5 1122
3a749623
JK
1123 if (!ice_is_e810(&pf->hw))
1124 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1125
7d9c9b79
DE
1126 if (ice_is_dcb_active(pf)) {
1127 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1128 ice_dcb_rebuild(pf);
1129 } else {
1130 if (link_up)
1131 ice_set_dflt_mib(pf);
1132 }
c2a23e00
BC
1133 ice_vsi_link_event(vsi, link_up);
1134 ice_print_link_msg(vsi, link_up);
0b28b702 1135
26a91525 1136 ice_vc_notify_link_state(pf);
53b8decb 1137
d348d517 1138 return 0;
0b28b702
AV
1139}
1140
1141/**
4f4be03b
AV
1142 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1143 * @pf: board private structure
0b28b702 1144 */
4f4be03b 1145static void ice_watchdog_subtask(struct ice_pf *pf)
0b28b702 1146{
4f4be03b 1147 int i;
0b28b702 1148
4f4be03b 1149 /* if interface is down do nothing */
7e408e07
AV
1150 if (test_bit(ICE_DOWN, pf->state) ||
1151 test_bit(ICE_CFG_BUSY, pf->state))
4f4be03b 1152 return;
0b28b702 1153
4f4be03b
AV
1154 /* make sure we don't do these things too often */
1155 if (time_before(jiffies,
1156 pf->serv_tmr_prev + pf->serv_tmr_period))
1157 return;
0b28b702 1158
4f4be03b
AV
1159 pf->serv_tmr_prev = jiffies;
1160
4f4be03b
AV
1161 /* Update the stats for active netdevs so the network stack
1162 * can look at updated numbers whenever it cares to
1163 */
1164 ice_update_pf_stats(pf);
80ed404a 1165 ice_for_each_vsi(pf, i)
4f4be03b
AV
1166 if (pf->vsi[i] && pf->vsi[i]->netdev)
1167 ice_update_vsi_stats(pf->vsi[i]);
0b28b702
AV
1168}
1169
250c3b3e
BC
1170/**
1171 * ice_init_link_events - enable/initialize link events
1172 * @pi: pointer to the port_info instance
1173 *
1174 * Returns -EIO on failure, 0 on success
1175 */
1176static int ice_init_link_events(struct ice_port_info *pi)
1177{
1178 u16 mask;
1179
1180 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
99d40752
BC
1181 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1182 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
250c3b3e
BC
1183
1184 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
19cce2c6 1185 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
250c3b3e
BC
1186 pi->lport);
1187 return -EIO;
1188 }
1189
1190 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
19cce2c6 1191 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
250c3b3e
BC
1192 pi->lport);
1193 return -EIO;
1194 }
1195
1196 return 0;
1197}
1198
1199/**
1200 * ice_handle_link_event - handle link event via ARQ
2f2da36e 1201 * @pf: PF that the link event is associated with
c2a23e00 1202 * @event: event structure containing link status info
250c3b3e 1203 */
c2a23e00
BC
1204static int
1205ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
250c3b3e 1206{
c2a23e00 1207 struct ice_aqc_get_link_status_data *link_data;
250c3b3e
BC
1208 struct ice_port_info *port_info;
1209 int status;
1210
c2a23e00 1211 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
250c3b3e
BC
1212 port_info = pf->hw.port_info;
1213 if (!port_info)
1214 return -EINVAL;
1215
c2a23e00
BC
1216 status = ice_link_event(pf, port_info,
1217 !!(link_data->link_info & ICE_AQ_LINK_UP),
1218 le16_to_cpu(link_data->link_speed));
250c3b3e 1219 if (status)
19cce2c6
AV
1220 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1221 status);
250c3b3e
BC
1222
1223 return status;
1224}
1225
d69ea414
JK
1226enum ice_aq_task_state {
1227 ICE_AQ_TASK_WAITING = 0,
1228 ICE_AQ_TASK_COMPLETE,
1229 ICE_AQ_TASK_CANCELED,
1230};
1231
1232struct ice_aq_task {
1233 struct hlist_node entry;
1234
1235 u16 opcode;
1236 struct ice_rq_event_info *event;
1237 enum ice_aq_task_state state;
1238};
1239
1240/**
ef860480 1241 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
d69ea414
JK
1242 * @pf: pointer to the PF private structure
1243 * @opcode: the opcode to wait for
1244 * @timeout: how long to wait, in jiffies
1245 * @event: storage for the event info
1246 *
1247 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1248 * current thread will be put to sleep until the specified event occurs or
1249 * until the given timeout is reached.
1250 *
1251 * To obtain only the descriptor contents, pass an event without an allocated
1252 * msg_buf. If the complete data buffer is desired, allocate the
1253 * event->msg_buf with enough space ahead of time.
1254 *
1255 * Returns: zero on success, or a negative error code on failure.
1256 */
1257int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1258 struct ice_rq_event_info *event)
1259{
1e8249cc 1260 struct device *dev = ice_pf_to_dev(pf);
d69ea414 1261 struct ice_aq_task *task;
1e8249cc 1262 unsigned long start;
d69ea414
JK
1263 long ret;
1264 int err;
1265
1266 task = kzalloc(sizeof(*task), GFP_KERNEL);
1267 if (!task)
1268 return -ENOMEM;
1269
1270 INIT_HLIST_NODE(&task->entry);
1271 task->opcode = opcode;
1272 task->event = event;
1273 task->state = ICE_AQ_TASK_WAITING;
1274
1275 spin_lock_bh(&pf->aq_wait_lock);
1276 hlist_add_head(&task->entry, &pf->aq_wait_list);
1277 spin_unlock_bh(&pf->aq_wait_lock);
1278
1e8249cc
JK
1279 start = jiffies;
1280
d69ea414
JK
1281 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1282 timeout);
1283 switch (task->state) {
1284 case ICE_AQ_TASK_WAITING:
1285 err = ret < 0 ? ret : -ETIMEDOUT;
1286 break;
1287 case ICE_AQ_TASK_CANCELED:
1288 err = ret < 0 ? ret : -ECANCELED;
1289 break;
1290 case ICE_AQ_TASK_COMPLETE:
1291 err = ret < 0 ? ret : 0;
1292 break;
1293 default:
1294 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1295 err = -EINVAL;
1296 break;
1297 }
1298
1e8249cc
JK
1299 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1300 jiffies_to_msecs(jiffies - start),
1301 jiffies_to_msecs(timeout),
1302 opcode);
1303
d69ea414
JK
1304 spin_lock_bh(&pf->aq_wait_lock);
1305 hlist_del(&task->entry);
1306 spin_unlock_bh(&pf->aq_wait_lock);
1307 kfree(task);
1308
1309 return err;
1310}
1311
1312/**
1313 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1314 * @pf: pointer to the PF private structure
1315 * @opcode: the opcode of the event
1316 * @event: the event to check
1317 *
1318 * Loops over the current list of pending threads waiting for an AdminQ event.
1319 * For each matching task, copy the contents of the event into the task
1320 * structure and wake up the thread.
1321 *
1322 * If multiple threads wait for the same opcode, they will all be woken up.
1323 *
1324 * Note that event->msg_buf will only be duplicated if the event has a buffer
1325 * with enough space already allocated. Otherwise, only the descriptor and
1326 * message length will be copied.
1327 *
1328 * Returns: true if an event was found, false otherwise
1329 */
1330static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1331 struct ice_rq_event_info *event)
1332{
1333 struct ice_aq_task *task;
1334 bool found = false;
1335
1336 spin_lock_bh(&pf->aq_wait_lock);
1337 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1338 if (task->state || task->opcode != opcode)
1339 continue;
1340
1341 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1342 task->event->msg_len = event->msg_len;
1343
1344 /* Only copy the data buffer if a destination was set */
1345 if (task->event->msg_buf &&
1346 task->event->buf_len > event->buf_len) {
1347 memcpy(task->event->msg_buf, event->msg_buf,
1348 event->buf_len);
1349 task->event->buf_len = event->buf_len;
1350 }
1351
1352 task->state = ICE_AQ_TASK_COMPLETE;
1353 found = true;
1354 }
1355 spin_unlock_bh(&pf->aq_wait_lock);
1356
1357 if (found)
1358 wake_up(&pf->aq_wait_queue);
1359}
1360
1361/**
1362 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1363 * @pf: the PF private structure
1364 *
1365 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1366 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1367 */
1368static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1369{
1370 struct ice_aq_task *task;
1371
1372 spin_lock_bh(&pf->aq_wait_lock);
1373 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1374 task->state = ICE_AQ_TASK_CANCELED;
1375 spin_unlock_bh(&pf->aq_wait_lock);
1376
1377 wake_up(&pf->aq_wait_queue);
1378}
1379
940b61af
AV
1380/**
1381 * __ice_clean_ctrlq - helper function to clean controlq rings
1382 * @pf: ptr to struct ice_pf
1383 * @q_type: specific Control queue type
1384 */
1385static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1386{
4015d11e 1387 struct device *dev = ice_pf_to_dev(pf);
940b61af
AV
1388 struct ice_rq_event_info event;
1389 struct ice_hw *hw = &pf->hw;
1390 struct ice_ctl_q_info *cq;
1391 u16 pending, i = 0;
1392 const char *qtype;
1393 u32 oldval, val;
1394
0b28b702 1395 /* Do not clean control queue if/when PF reset fails */
7e408e07 1396 if (test_bit(ICE_RESET_FAILED, pf->state))
0b28b702
AV
1397 return 0;
1398
940b61af
AV
1399 switch (q_type) {
1400 case ICE_CTL_Q_ADMIN:
1401 cq = &hw->adminq;
1402 qtype = "Admin";
1403 break;
8f5ee3c4
JK
1404 case ICE_CTL_Q_SB:
1405 cq = &hw->sbq;
1406 qtype = "Sideband";
1407 break;
75d2b253
AV
1408 case ICE_CTL_Q_MAILBOX:
1409 cq = &hw->mailboxq;
1410 qtype = "Mailbox";
0891c896
VS
1411 /* we are going to try to detect a malicious VF, so set the
1412 * state to begin detection
1413 */
1414 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
75d2b253 1415 break;
940b61af 1416 default:
4015d11e 1417 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
940b61af
AV
1418 return 0;
1419 }
1420
1421 /* check for error indications - PF_xx_AxQLEN register layout for
1422 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1423 */
1424 val = rd32(hw, cq->rq.len);
1425 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1426 PF_FW_ARQLEN_ARQCRIT_M)) {
1427 oldval = val;
1428 if (val & PF_FW_ARQLEN_ARQVFE_M)
4015d11e
BC
1429 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1430 qtype);
940b61af 1431 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
19cce2c6 1432 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
940b61af
AV
1433 qtype);
1434 }
1435 if (val & PF_FW_ARQLEN_ARQCRIT_M)
19cce2c6 1436 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
940b61af
AV
1437 qtype);
1438 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1439 PF_FW_ARQLEN_ARQCRIT_M);
1440 if (oldval != val)
1441 wr32(hw, cq->rq.len, val);
1442 }
1443
1444 val = rd32(hw, cq->sq.len);
1445 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1446 PF_FW_ATQLEN_ATQCRIT_M)) {
1447 oldval = val;
1448 if (val & PF_FW_ATQLEN_ATQVFE_M)
19cce2c6
AV
1449 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1450 qtype);
940b61af 1451 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
4015d11e 1452 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
940b61af
AV
1453 qtype);
1454 }
1455 if (val & PF_FW_ATQLEN_ATQCRIT_M)
4015d11e 1456 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
940b61af
AV
1457 qtype);
1458 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1459 PF_FW_ATQLEN_ATQCRIT_M);
1460 if (oldval != val)
1461 wr32(hw, cq->sq.len, val);
1462 }
1463
1464 event.buf_len = cq->rq_buf_size;
9efe35d0 1465 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
940b61af
AV
1466 if (!event.msg_buf)
1467 return 0;
1468
1469 do {
0b28b702 1470 u16 opcode;
5518ac2a 1471 int ret;
940b61af
AV
1472
1473 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
d54699e2 1474 if (ret == -EALREADY)
940b61af
AV
1475 break;
1476 if (ret) {
5f87ec48
TN
1477 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1478 ret);
940b61af
AV
1479 break;
1480 }
0b28b702
AV
1481
1482 opcode = le16_to_cpu(event.desc.opcode);
1483
d69ea414
JK
1484 /* Notify any thread that might be waiting for this event */
1485 ice_aq_check_events(pf, opcode, &event);
1486
0b28b702 1487 switch (opcode) {
250c3b3e 1488 case ice_aqc_opc_get_link_status:
c2a23e00 1489 if (ice_handle_link_event(pf, &event))
4015d11e 1490 dev_err(dev, "Could not handle link event\n");
250c3b3e 1491 break;
2309ae38
BC
1492 case ice_aqc_opc_event_lan_overflow:
1493 ice_vf_lan_overflow_event(pf, &event);
1494 break;
1071a835 1495 case ice_mbx_opc_send_msg_to_pf:
0891c896
VS
1496 if (!ice_is_malicious_vf(pf, &event, i, pending))
1497 ice_vc_process_vf_msg(pf, &event);
1071a835 1498 break;
8b97ceb1
HT
1499 case ice_aqc_opc_fw_logging:
1500 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1501 break;
00cc3f1b
AV
1502 case ice_aqc_opc_lldp_set_mib_change:
1503 ice_dcb_process_lldp_set_mib_change(pf, &event);
1504 break;
0b28b702 1505 default:
19cce2c6 1506 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
0b28b702
AV
1507 qtype, opcode);
1508 break;
1509 }
940b61af
AV
1510 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1511
9efe35d0 1512 kfree(event.msg_buf);
940b61af
AV
1513
1514 return pending && (i == ICE_DFLT_IRQ_WORK);
1515}
1516
3d6b640e
AV
1517/**
1518 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1519 * @hw: pointer to hardware info
1520 * @cq: control queue information
1521 *
1522 * returns true if there are pending messages in a queue, false if there aren't
1523 */
1524static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1525{
1526 u16 ntu;
1527
1528 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1529 return cq->rq.next_to_clean != ntu;
1530}
1531
940b61af
AV
1532/**
1533 * ice_clean_adminq_subtask - clean the AdminQ rings
1534 * @pf: board private structure
1535 */
1536static void ice_clean_adminq_subtask(struct ice_pf *pf)
1537{
1538 struct ice_hw *hw = &pf->hw;
940b61af 1539
7e408e07 1540 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
940b61af
AV
1541 return;
1542
1543 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1544 return;
1545
7e408e07 1546 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
940b61af 1547
3d6b640e
AV
1548 /* There might be a situation where new messages arrive to a control
1549 * queue between processing the last message and clearing the
1550 * EVENT_PENDING bit. So before exiting, check queue head again (using
1551 * ice_ctrlq_pending) and process new messages if any.
1552 */
1553 if (ice_ctrlq_pending(hw, &hw->adminq))
1554 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
940b61af
AV
1555
1556 ice_flush(hw);
1557}
1558
75d2b253
AV
1559/**
1560 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1561 * @pf: board private structure
1562 */
1563static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1564{
1565 struct ice_hw *hw = &pf->hw;
1566
7e408e07 1567 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
75d2b253
AV
1568 return;
1569
1570 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1571 return;
1572
7e408e07 1573 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
75d2b253
AV
1574
1575 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1576 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1577
1578 ice_flush(hw);
1579}
1580
8f5ee3c4
JK
1581/**
1582 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1583 * @pf: board private structure
1584 */
1585static void ice_clean_sbq_subtask(struct ice_pf *pf)
1586{
1587 struct ice_hw *hw = &pf->hw;
1588
1589 /* Nothing to do here if sideband queue is not supported */
1590 if (!ice_is_sbq_supported(hw)) {
1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1592 return;
1593 }
1594
1595 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1596 return;
1597
1598 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1599 return;
1600
1601 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1602
1603 if (ice_ctrlq_pending(hw, &hw->sbq))
1604 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1605
1606 ice_flush(hw);
1607}
1608
940b61af
AV
1609/**
1610 * ice_service_task_schedule - schedule the service task to wake up
1611 * @pf: board private structure
1612 *
1613 * If not already scheduled, this puts the task into the work queue.
1614 */
28bf2672 1615void ice_service_task_schedule(struct ice_pf *pf)
940b61af 1616{
7e408e07
AV
1617 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1618 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1619 !test_bit(ICE_NEEDS_RESTART, pf->state))
940b61af
AV
1620 queue_work(ice_wq, &pf->serv_task);
1621}
1622
1623/**
1624 * ice_service_task_complete - finish up the service task
1625 * @pf: board private structure
1626 */
1627static void ice_service_task_complete(struct ice_pf *pf)
1628{
7e408e07 1629 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
940b61af
AV
1630
1631 /* force memory (pf->state) to sync before next service task */
1632 smp_mb__before_atomic();
7e408e07 1633 clear_bit(ICE_SERVICE_SCHED, pf->state);
940b61af
AV
1634}
1635
8d81fa55
AA
1636/**
1637 * ice_service_task_stop - stop service task and cancel works
1638 * @pf: board private structure
769c500d 1639 *
7e408e07 1640 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
769c500d 1641 * 1 otherwise.
8d81fa55 1642 */
769c500d 1643static int ice_service_task_stop(struct ice_pf *pf)
8d81fa55 1644{
769c500d
AA
1645 int ret;
1646
7e408e07 1647 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
8d81fa55
AA
1648
1649 if (pf->serv_tmr.function)
1650 del_timer_sync(&pf->serv_tmr);
1651 if (pf->serv_task.func)
1652 cancel_work_sync(&pf->serv_task);
1653
7e408e07 1654 clear_bit(ICE_SERVICE_SCHED, pf->state);
769c500d 1655 return ret;
8d81fa55
AA
1656}
1657
5995b6d0
BC
1658/**
1659 * ice_service_task_restart - restart service task and schedule works
1660 * @pf: board private structure
1661 *
1662 * This function is needed for suspend and resume works (e.g WoL scenario)
1663 */
1664static void ice_service_task_restart(struct ice_pf *pf)
1665{
7e408e07 1666 clear_bit(ICE_SERVICE_DIS, pf->state);
5995b6d0
BC
1667 ice_service_task_schedule(pf);
1668}
1669
940b61af
AV
1670/**
1671 * ice_service_timer - timer callback to schedule service task
1672 * @t: pointer to timer_list
1673 */
1674static void ice_service_timer(struct timer_list *t)
1675{
1676 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1677
1678 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1679 ice_service_task_schedule(pf);
1680}
1681
b3969fd7
SM
1682/**
1683 * ice_handle_mdd_event - handle malicious driver detect event
1684 * @pf: pointer to the PF structure
1685 *
9d5c5a52
PG
1686 * Called from service task. OICR interrupt handler indicates MDD event.
1687 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1688 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1689 * disable the queue, the PF can be configured to reset the VF using ethtool
1690 * private flag mdd-auto-reset-vf.
b3969fd7
SM
1691 */
1692static void ice_handle_mdd_event(struct ice_pf *pf)
1693{
4015d11e 1694 struct device *dev = ice_pf_to_dev(pf);
b3969fd7 1695 struct ice_hw *hw = &pf->hw;
c4c2c7db
JK
1696 struct ice_vf *vf;
1697 unsigned int bkt;
b3969fd7
SM
1698 u32 reg;
1699
7e408e07 1700 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
9d5c5a52
PG
1701 /* Since the VF MDD event logging is rate limited, check if
1702 * there are pending MDD events.
1703 */
1704 ice_print_vfs_mdd_events(pf);
b3969fd7 1705 return;
9d5c5a52 1706 }
b3969fd7 1707
9d5c5a52 1708 /* find what triggered an MDD event */
b3969fd7
SM
1709 reg = rd32(hw, GL_MDET_TX_PQM);
1710 if (reg & GL_MDET_TX_PQM_VALID_M) {
1711 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1712 GL_MDET_TX_PQM_PF_NUM_S;
1713 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1714 GL_MDET_TX_PQM_VF_NUM_S;
1715 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1716 GL_MDET_TX_PQM_MAL_TYPE_S;
1717 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1718 GL_MDET_TX_PQM_QNUM_S);
1719
1720 if (netif_msg_tx_err(pf))
4015d11e 1721 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
b3969fd7
SM
1722 event, queue, pf_num, vf_num);
1723 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
b3969fd7
SM
1724 }
1725
1726 reg = rd32(hw, GL_MDET_TX_TCLAN);
1727 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1728 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1729 GL_MDET_TX_TCLAN_PF_NUM_S;
1730 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1731 GL_MDET_TX_TCLAN_VF_NUM_S;
1732 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1733 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1734 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1735 GL_MDET_TX_TCLAN_QNUM_S);
1736
1d8bd992 1737 if (netif_msg_tx_err(pf))
4015d11e 1738 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
b3969fd7
SM
1739 event, queue, pf_num, vf_num);
1740 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
b3969fd7
SM
1741 }
1742
1743 reg = rd32(hw, GL_MDET_RX);
1744 if (reg & GL_MDET_RX_VALID_M) {
1745 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1746 GL_MDET_RX_PF_NUM_S;
1747 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1748 GL_MDET_RX_VF_NUM_S;
1749 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1750 GL_MDET_RX_MAL_TYPE_S;
1751 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1752 GL_MDET_RX_QNUM_S);
1753
1754 if (netif_msg_rx_err(pf))
4015d11e 1755 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
b3969fd7
SM
1756 event, queue, pf_num, vf_num);
1757 wr32(hw, GL_MDET_RX, 0xffffffff);
b3969fd7
SM
1758 }
1759
9d5c5a52
PG
1760 /* check to see if this PF caused an MDD event */
1761 reg = rd32(hw, PF_MDET_TX_PQM);
1762 if (reg & PF_MDET_TX_PQM_VALID_M) {
1763 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1764 if (netif_msg_tx_err(pf))
1765 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1766 }
b3969fd7 1767
9d5c5a52
PG
1768 reg = rd32(hw, PF_MDET_TX_TCLAN);
1769 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1770 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1771 if (netif_msg_tx_err(pf))
1772 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1773 }
b3969fd7 1774
9d5c5a52
PG
1775 reg = rd32(hw, PF_MDET_RX);
1776 if (reg & PF_MDET_RX_VALID_M) {
1777 wr32(hw, PF_MDET_RX, 0xFFFF);
1778 if (netif_msg_rx_err(pf))
1779 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
b3969fd7
SM
1780 }
1781
9d5c5a52
PG
1782 /* Check to see if one of the VFs caused an MDD event, and then
1783 * increment counters and set print pending
1784 */
3d5985a1 1785 mutex_lock(&pf->vfs.table_lock);
c4c2c7db
JK
1786 ice_for_each_vf(pf, bkt, vf) {
1787 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
7c4bc1f5 1788 if (reg & VP_MDET_TX_PQM_VALID_M) {
c4c2c7db 1789 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
9d5c5a52 1790 vf->mdd_tx_events.count++;
7e408e07 1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
9d5c5a52
PG
1792 if (netif_msg_tx_err(pf))
1793 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
c4c2c7db 1794 vf->vf_id);
7c4bc1f5
AV
1795 }
1796
c4c2c7db 1797 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
7c4bc1f5 1798 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
c4c2c7db 1799 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
9d5c5a52 1800 vf->mdd_tx_events.count++;
7e408e07 1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
9d5c5a52
PG
1802 if (netif_msg_tx_err(pf))
1803 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
c4c2c7db 1804 vf->vf_id);
7c4bc1f5
AV
1805 }
1806
c4c2c7db 1807 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
7c4bc1f5 1808 if (reg & VP_MDET_TX_TDPU_VALID_M) {
c4c2c7db 1809 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
9d5c5a52 1810 vf->mdd_tx_events.count++;
7e408e07 1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
9d5c5a52
PG
1812 if (netif_msg_tx_err(pf))
1813 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
c4c2c7db 1814 vf->vf_id);
7c4bc1f5
AV
1815 }
1816
c4c2c7db 1817 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
7c4bc1f5 1818 if (reg & VP_MDET_RX_VALID_M) {
c4c2c7db 1819 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
9d5c5a52 1820 vf->mdd_rx_events.count++;
7e408e07 1821 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
9d5c5a52
PG
1822 if (netif_msg_rx_err(pf))
1823 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
c4c2c7db 1824 vf->vf_id);
9d5c5a52
PG
1825
1826 /* Since the queue is disabled on VF Rx MDD events, the
1827 * PF can be configured to reset the VF through ethtool
1828 * private flag mdd-auto-reset-vf.
1829 */
7438a3b0
PG
1830 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1831 /* VF MDD event counters will be cleared by
1832 * reset, so print the event prior to reset.
1833 */
1834 ice_print_vf_rx_mdd_event(vf);
f5f085c0 1835 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
7438a3b0 1836 }
7c4bc1f5
AV
1837 }
1838 }
3d5985a1 1839 mutex_unlock(&pf->vfs.table_lock);
9d5c5a52
PG
1840
1841 ice_print_vfs_mdd_events(pf);
b3969fd7
SM
1842}
1843
6d599946
TN
1844/**
1845 * ice_force_phys_link_state - Force the physical link state
1846 * @vsi: VSI to force the physical link state to up/down
1847 * @link_up: true/false indicates to set the physical link to up/down
1848 *
1849 * Force the physical link state by getting the current PHY capabilities from
1850 * hardware and setting the PHY config based on the determined capabilities. If
1851 * link changes a link event will be triggered because both the Enable Automatic
1852 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1853 *
1854 * Returns 0 on success, negative on failure
1855 */
1856static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1857{
1858 struct ice_aqc_get_phy_caps_data *pcaps;
1859 struct ice_aqc_set_phy_cfg_data *cfg;
1860 struct ice_port_info *pi;
1861 struct device *dev;
1862 int retcode;
1863
1864 if (!vsi || !vsi->port_info || !vsi->back)
1865 return -EINVAL;
1866 if (vsi->type != ICE_VSI_PF)
1867 return 0;
1868
9a946843 1869 dev = ice_pf_to_dev(vsi->back);
6d599946
TN
1870
1871 pi = vsi->port_info;
1872
9efe35d0 1873 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
6d599946
TN
1874 if (!pcaps)
1875 return -ENOMEM;
1876
d6730a87 1877 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
6d599946
TN
1878 NULL);
1879 if (retcode) {
19cce2c6 1880 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
6d599946
TN
1881 vsi->vsi_num, retcode);
1882 retcode = -EIO;
1883 goto out;
1884 }
1885
1886 /* No change in link */
1887 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1888 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1889 goto out;
1890
1a3571b5
PG
1891 /* Use the current user PHY configuration. The current user PHY
1892 * configuration is initialized during probe from PHY capabilities
1893 * software mode, and updated on set PHY configuration.
1894 */
1895 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
6d599946
TN
1896 if (!cfg) {
1897 retcode = -ENOMEM;
1898 goto out;
1899 }
1900
1a3571b5 1901 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
6d599946
TN
1902 if (link_up)
1903 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1904 else
1905 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1906
1a3571b5 1907 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
6d599946
TN
1908 if (retcode) {
1909 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1910 vsi->vsi_num, retcode);
1911 retcode = -EIO;
1912 }
1913
9efe35d0 1914 kfree(cfg);
6d599946 1915out:
9efe35d0 1916 kfree(pcaps);
6d599946
TN
1917 return retcode;
1918}
1919
1920/**
1a3571b5
PG
1921 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1922 * @pi: port info structure
1923 *
ea78ce4d 1924 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1a3571b5
PG
1925 */
1926static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1927{
1928 struct ice_aqc_get_phy_caps_data *pcaps;
1929 struct ice_pf *pf = pi->hw->back;
2ccc1c1c 1930 int err;
1a3571b5
PG
1931
1932 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1933 if (!pcaps)
1934 return -ENOMEM;
1935
2ccc1c1c
TN
1936 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1937 pcaps, NULL);
1a3571b5 1938
2ccc1c1c 1939 if (err) {
1a3571b5 1940 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1a3571b5
PG
1941 goto out;
1942 }
1943
1944 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1945 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1946
1947out:
1948 kfree(pcaps);
1949 return err;
1950}
1951
ea78ce4d
PG
1952/**
1953 * ice_init_link_dflt_override - Initialize link default override
1954 * @pi: port info structure
b4e813dd
BA
1955 *
1956 * Initialize link default override and PHY total port shutdown during probe
ea78ce4d
PG
1957 */
1958static void ice_init_link_dflt_override(struct ice_port_info *pi)
1959{
1960 struct ice_link_default_override_tlv *ldo;
1961 struct ice_pf *pf = pi->hw->back;
1962
1963 ldo = &pf->link_dflt_override;
b4e813dd
BA
1964 if (ice_get_link_default_override(ldo, pi))
1965 return;
1966
1967 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1968 return;
1969
1970 /* Enable Total Port Shutdown (override/replace link-down-on-close
1971 * ethtool private flag) for ports with Port Disable bit set.
1972 */
1973 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1974 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
ea78ce4d
PG
1975}
1976
1977/**
1978 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1979 * @pi: port info structure
1980 *
0a02944f 1981 * If default override is enabled, initialize the user PHY cfg speed and FEC
ea78ce4d
PG
1982 * settings using the default override mask from the NVM.
1983 *
1984 * The PHY should only be configured with the default override settings the
7e408e07 1985 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
ea78ce4d
PG
1986 * is used to indicate that the user PHY cfg default override is initialized
1987 * and the PHY has not been configured with the default override settings. The
1988 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1989 * configured.
0a02944f
AV
1990 *
1991 * This function should be called only if the FW doesn't support default
1992 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
ea78ce4d
PG
1993 */
1994static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1995{
1996 struct ice_link_default_override_tlv *ldo;
1997 struct ice_aqc_set_phy_cfg_data *cfg;
1998 struct ice_phy_info *phy = &pi->phy;
1999 struct ice_pf *pf = pi->hw->back;
2000
2001 ldo = &pf->link_dflt_override;
2002
2003 /* If link default override is enabled, use to mask NVM PHY capabilities
2004 * for speed and FEC default configuration.
2005 */
2006 cfg = &phy->curr_user_phy_cfg;
2007
2008 if (ldo->phy_type_low || ldo->phy_type_high) {
2009 cfg->phy_type_low = pf->nvm_phy_type_lo &
2010 cpu_to_le64(ldo->phy_type_low);
2011 cfg->phy_type_high = pf->nvm_phy_type_hi &
2012 cpu_to_le64(ldo->phy_type_high);
2013 }
2014 cfg->link_fec_opt = ldo->fec_options;
2015 phy->curr_user_fec_req = ICE_FEC_AUTO;
2016
7e408e07 2017 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
ea78ce4d
PG
2018}
2019
1a3571b5
PG
2020/**
2021 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2022 * @pi: port info structure
2023 *
2024 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2025 * mode to default. The PHY defaults are from get PHY capabilities topology
2026 * with media so call when media is first available. An error is returned if
2027 * called when media is not available. The PHY initialization completed state is
2028 * set here.
2029 *
2030 * These configurations are used when setting PHY
2031 * configuration. The user PHY configuration is updated on set PHY
2032 * configuration. Returns 0 on success, negative on failure
2033 */
2034static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2035{
2036 struct ice_aqc_get_phy_caps_data *pcaps;
2037 struct ice_phy_info *phy = &pi->phy;
2038 struct ice_pf *pf = pi->hw->back;
2ccc1c1c 2039 int err;
1a3571b5
PG
2040
2041 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2042 return -EIO;
2043
1a3571b5
PG
2044 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2045 if (!pcaps)
2046 return -ENOMEM;
2047
0a02944f 2048 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2ccc1c1c
TN
2049 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2050 pcaps, NULL);
0a02944f 2051 else
2ccc1c1c
TN
2052 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2053 pcaps, NULL);
2054 if (err) {
1a3571b5 2055 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1a3571b5
PG
2056 goto err_out;
2057 }
2058
ea78ce4d
PG
2059 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2060
2061 /* check if lenient mode is supported and enabled */
dc6aaa13 2062 if (ice_fw_supports_link_override(pi->hw) &&
ea78ce4d
PG
2063 !(pcaps->module_compliance_enforcement &
2064 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2065 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2066
0a02944f
AV
2067 /* if the FW supports default PHY configuration mode, then the driver
2068 * does not have to apply link override settings. If not,
2069 * initialize user PHY configuration with link override values
ea78ce4d 2070 */
0a02944f
AV
2071 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2072 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
ea78ce4d
PG
2073 ice_init_phy_cfg_dflt_override(pi);
2074 goto out;
2075 }
2076 }
2077
0a02944f
AV
2078 /* if link default override is not enabled, set user flow control and
2079 * FEC settings based on what get_phy_caps returned
ea78ce4d 2080 */
1a3571b5
PG
2081 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2082 pcaps->link_fec_options);
2083 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2084
ea78ce4d 2085out:
1a3571b5 2086 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
7e408e07 2087 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1a3571b5
PG
2088err_out:
2089 kfree(pcaps);
2090 return err;
2091}
2092
2093/**
2094 * ice_configure_phy - configure PHY
2095 * @vsi: VSI of PHY
2096 *
2097 * Set the PHY configuration. If the current PHY configuration is the same as
2098 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2099 * configure the based get PHY capabilities for topology with media.
2100 */
2101static int ice_configure_phy(struct ice_vsi *vsi)
2102{
2103 struct device *dev = ice_pf_to_dev(vsi->back);
efc1eddb 2104 struct ice_port_info *pi = vsi->port_info;
1a3571b5
PG
2105 struct ice_aqc_get_phy_caps_data *pcaps;
2106 struct ice_aqc_set_phy_cfg_data *cfg;
efc1eddb
AV
2107 struct ice_phy_info *phy = &pi->phy;
2108 struct ice_pf *pf = vsi->back;
2ccc1c1c 2109 int err;
1a3571b5 2110
1a3571b5 2111 /* Ensure we have media as we cannot configure a medialess port */
efc1eddb 2112 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1a3571b5
PG
2113 return -EPERM;
2114
2115 ice_print_topo_conflict(vsi);
2116
4fc5fbee
AV
2117 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2118 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1a3571b5
PG
2119 return -EPERM;
2120
efc1eddb 2121 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1a3571b5
PG
2122 return ice_force_phys_link_state(vsi, true);
2123
2124 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2125 if (!pcaps)
2126 return -ENOMEM;
2127
2128 /* Get current PHY config */
2ccc1c1c
TN
2129 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2130 NULL);
2131 if (err) {
5f87ec48 2132 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2ccc1c1c 2133 vsi->vsi_num, err);
1a3571b5
PG
2134 goto done;
2135 }
2136
2137 /* If PHY enable link is configured and configuration has not changed,
2138 * there's nothing to do
2139 */
2140 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
efc1eddb 2141 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1a3571b5
PG
2142 goto done;
2143
2144 /* Use PHY topology as baseline for configuration */
2145 memset(pcaps, 0, sizeof(*pcaps));
0a02944f 2146 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2ccc1c1c
TN
2147 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2148 pcaps, NULL);
0a02944f 2149 else
2ccc1c1c
TN
2150 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2151 pcaps, NULL);
2152 if (err) {
5f87ec48 2153 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2ccc1c1c 2154 vsi->vsi_num, err);
1a3571b5
PG
2155 goto done;
2156 }
2157
2158 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2159 if (!cfg) {
2160 err = -ENOMEM;
2161 goto done;
2162 }
2163
ea78ce4d 2164 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1a3571b5
PG
2165
2166 /* Speed - If default override pending, use curr_user_phy_cfg set in
2167 * ice_init_phy_user_cfg_ldo.
2168 */
7e408e07 2169 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ea78ce4d 2170 vsi->back->state)) {
efc1eddb
AV
2171 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2172 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
ea78ce4d
PG
2173 } else {
2174 u64 phy_low = 0, phy_high = 0;
2175
2176 ice_update_phy_type(&phy_low, &phy_high,
2177 pi->phy.curr_user_speed_req);
2178 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2179 cfg->phy_type_high = pcaps->phy_type_high &
2180 cpu_to_le64(phy_high);
2181 }
1a3571b5
PG
2182
2183 /* Can't provide what was requested; use PHY capabilities */
2184 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2185 cfg->phy_type_low = pcaps->phy_type_low;
2186 cfg->phy_type_high = pcaps->phy_type_high;
2187 }
2188
2189 /* FEC */
efc1eddb 2190 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
1a3571b5
PG
2191
2192 /* Can't provide what was requested; use PHY capabilities */
2193 if (cfg->link_fec_opt !=
2194 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2195 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2196 cfg->link_fec_opt = pcaps->link_fec_options;
2197 }
2198
2199 /* Flow Control - always supported; no need to check against
2200 * capabilities
2201 */
efc1eddb 2202 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
1a3571b5
PG
2203
2204 /* Enable link and link update */
2205 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2206
2ccc1c1c 2207 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
c1484691 2208 if (err)
5f87ec48 2209 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2ccc1c1c 2210 vsi->vsi_num, err);
1a3571b5
PG
2211
2212 kfree(cfg);
2213done:
2214 kfree(pcaps);
2215 return err;
2216}
2217
2218/**
2219 * ice_check_media_subtask - Check for media
6d599946 2220 * @pf: pointer to PF struct
1a3571b5
PG
2221 *
2222 * If media is available, then initialize PHY user configuration if it is not
2223 * been, and configure the PHY if the interface is up.
6d599946
TN
2224 */
2225static void ice_check_media_subtask(struct ice_pf *pf)
2226{
2227 struct ice_port_info *pi;
2228 struct ice_vsi *vsi;
2229 int err;
2230
1a3571b5
PG
2231 /* No need to check for media if it's already present */
2232 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
6d599946
TN
2233 return;
2234
1a3571b5
PG
2235 vsi = ice_get_main_vsi(pf);
2236 if (!vsi)
6d599946
TN
2237 return;
2238
2239 /* Refresh link info and check if media is present */
2240 pi = vsi->port_info;
2241 err = ice_update_link_info(pi);
2242 if (err)
2243 return;
2244
99d40752 2245 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
c77849f5 2246
6d599946 2247 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7e408e07 2248 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
1a3571b5
PG
2249 ice_init_phy_user_cfg(pi);
2250
2251 /* PHY settings are reset on media insertion, reconfigure
2252 * PHY to preserve settings.
2253 */
e97fb1ae 2254 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
1a3571b5 2255 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
6d599946 2256 return;
1a3571b5
PG
2257
2258 err = ice_configure_phy(vsi);
2259 if (!err)
2260 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6d599946
TN
2261
2262 /* A Link Status Event will be generated; the event handler
2263 * will complete bringing the interface up
2264 */
2265 }
2266}
2267
940b61af
AV
2268/**
2269 * ice_service_task - manage and run subtasks
2270 * @work: pointer to work_struct contained by the PF struct
2271 */
2272static void ice_service_task(struct work_struct *work)
2273{
2274 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2275 unsigned long start_time = jiffies;
2276
2277 /* subtasks */
0b28b702
AV
2278
2279 /* process reset requests first */
2280 ice_reset_subtask(pf);
2281
0f9d5027 2282 /* bail if a reset/recovery cycle is pending or rebuild failed */
5df7e45d 2283 if (ice_is_reset_in_progress(pf->state) ||
7e408e07
AV
2284 test_bit(ICE_SUSPENDED, pf->state) ||
2285 test_bit(ICE_NEEDS_RESTART, pf->state)) {
0b28b702
AV
2286 ice_service_task_complete(pf);
2287 return;
2288 }
2289
32d53c0a
AL
2290 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2291 struct iidc_event *event;
2292
2293 event = kzalloc(sizeof(*event), GFP_KERNEL);
2294 if (event) {
2295 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2296 /* report the entire OICR value to AUX driver */
2297 swap(event->reg, pf->oicr_err_reg);
2298 ice_send_event_to_aux(pf, event);
2299 kfree(event);
2300 }
2301 }
2302
5cb1ebdb
IV
2303 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
2304 /* Plug aux device per request */
5dbbbd01
DE
2305 ice_plug_aux_dev(pf);
2306
5cb1ebdb
IV
2307 /* Mark plugging as done but check whether unplug was
2308 * requested during ice_plug_aux_dev() call
2309 * (e.g. from ice_clear_rdma_cap()) and if so then
2310 * plug aux device.
2311 */
2312 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2313 ice_unplug_aux_dev(pf);
2314 }
2315
97b01291
DE
2316 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2317 struct iidc_event *event;
2318
2319 event = kzalloc(sizeof(*event), GFP_KERNEL);
2320 if (event) {
2321 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2322 ice_send_event_to_aux(pf, event);
2323 kfree(event);
2324 }
2325 }
2326
462acf6a 2327 ice_clean_adminq_subtask(pf);
6d599946 2328 ice_check_media_subtask(pf);
b3969fd7 2329 ice_check_for_hang_subtask(pf);
e94d4478 2330 ice_sync_fltr_subtask(pf);
b3969fd7 2331 ice_handle_mdd_event(pf);
fcea6f3d 2332 ice_watchdog_subtask(pf);
462acf6a
TN
2333
2334 if (ice_is_safe_mode(pf)) {
2335 ice_service_task_complete(pf);
2336 return;
2337 }
2338
2339 ice_process_vflr_event(pf);
75d2b253 2340 ice_clean_mailboxq_subtask(pf);
8f5ee3c4 2341 ice_clean_sbq_subtask(pf);
28bf2672 2342 ice_sync_arfs_fltrs(pf);
d6218317 2343 ice_flush_fdir_ctx(pf);
7e408e07
AV
2344
2345 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
940b61af
AV
2346 ice_service_task_complete(pf);
2347
2348 /* If the tasks have taken longer than one service timer period
2349 * or there is more work to be done, reset the service timer to
2350 * schedule the service task now.
2351 */
2352 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
7e408e07
AV
2353 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2354 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2355 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2356 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
8f5ee3c4 2357 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
7e408e07 2358 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
940b61af
AV
2359 mod_timer(&pf->serv_tmr, jiffies);
2360}
2361
f31e4b6f
AV
2362/**
2363 * ice_set_ctrlq_len - helper function to set controlq length
f9867df6 2364 * @hw: pointer to the HW instance
f31e4b6f
AV
2365 */
2366static void ice_set_ctrlq_len(struct ice_hw *hw)
2367{
2368 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2369 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2370 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2371 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
c8a1071d 2372 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
11836214 2373 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
75d2b253
AV
2374 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2375 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
8f5ee3c4
JK
2376 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2377 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2378 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2379 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
f31e4b6f
AV
2380}
2381
87324e74
HT
2382/**
2383 * ice_schedule_reset - schedule a reset
2384 * @pf: board private structure
2385 * @reset: reset being requested
2386 */
2387int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2388{
2389 struct device *dev = ice_pf_to_dev(pf);
2390
2391 /* bail out if earlier reset has failed */
7e408e07 2392 if (test_bit(ICE_RESET_FAILED, pf->state)) {
87324e74
HT
2393 dev_dbg(dev, "earlier reset has failed\n");
2394 return -EIO;
2395 }
2396 /* bail if reset/recovery already in progress */
2397 if (ice_is_reset_in_progress(pf->state)) {
2398 dev_dbg(dev, "Reset already in progress\n");
2399 return -EBUSY;
2400 }
2401
f9f5301e
DE
2402 ice_unplug_aux_dev(pf);
2403
87324e74
HT
2404 switch (reset) {
2405 case ICE_RESET_PFR:
7e408e07 2406 set_bit(ICE_PFR_REQ, pf->state);
87324e74
HT
2407 break;
2408 case ICE_RESET_CORER:
7e408e07 2409 set_bit(ICE_CORER_REQ, pf->state);
87324e74
HT
2410 break;
2411 case ICE_RESET_GLOBR:
7e408e07 2412 set_bit(ICE_GLOBR_REQ, pf->state);
87324e74
HT
2413 break;
2414 default:
2415 return -EINVAL;
2416 }
2417
2418 ice_service_task_schedule(pf);
2419 return 0;
2420}
2421
cdedef59
AV
2422/**
2423 * ice_irq_affinity_notify - Callback for affinity changes
2424 * @notify: context as to what irq was changed
2425 * @mask: the new affinity mask
2426 *
2427 * This is a callback function used by the irq_set_affinity_notifier function
2428 * so that we may register to receive changes to the irq affinity masks.
2429 */
c8b7abdd
BA
2430static void
2431ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2432 const cpumask_t *mask)
cdedef59
AV
2433{
2434 struct ice_q_vector *q_vector =
2435 container_of(notify, struct ice_q_vector, affinity_notify);
2436
2437 cpumask_copy(&q_vector->affinity_mask, mask);
2438}
2439
2440/**
2441 * ice_irq_affinity_release - Callback for affinity notifier release
2442 * @ref: internal core kernel usage
2443 *
2444 * This is a callback function used by the irq_set_affinity_notifier function
2445 * to inform the current notification subscriber that they will no longer
2446 * receive notifications.
2447 */
2448static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2449
cdedef59
AV
2450/**
2451 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2452 * @vsi: the VSI being configured
2453 */
2454static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2455{
ba880734
BC
2456 struct ice_hw *hw = &vsi->back->hw;
2457 int i;
cdedef59 2458
ba880734
BC
2459 ice_for_each_q_vector(vsi, i)
2460 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
cdedef59
AV
2461
2462 ice_flush(hw);
2463 return 0;
2464}
2465
cdedef59
AV
2466/**
2467 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2468 * @vsi: the VSI being configured
2469 * @basename: name for the vector
2470 */
2471static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2472{
2473 int q_vectors = vsi->num_q_vectors;
2474 struct ice_pf *pf = vsi->back;
cbe66bfe 2475 int base = vsi->base_vector;
4015d11e 2476 struct device *dev;
cdedef59
AV
2477 int rx_int_idx = 0;
2478 int tx_int_idx = 0;
2479 int vector, err;
2480 int irq_num;
2481
4015d11e 2482 dev = ice_pf_to_dev(pf);
cdedef59
AV
2483 for (vector = 0; vector < q_vectors; vector++) {
2484 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2485
2486 irq_num = pf->msix_entries[base + vector].vector;
2487
e72bba21 2488 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
cdedef59
AV
2489 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2490 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2491 tx_int_idx++;
e72bba21 2492 } else if (q_vector->rx.rx_ring) {
cdedef59
AV
2493 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2494 "%s-%s-%d", basename, "rx", rx_int_idx++);
e72bba21 2495 } else if (q_vector->tx.tx_ring) {
cdedef59
AV
2496 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2497 "%s-%s-%d", basename, "tx", tx_int_idx++);
2498 } else {
2499 /* skip this unused q_vector */
2500 continue;
2501 }
b03d519d 2502 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
da62c5ff
QZ
2503 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2504 IRQF_SHARED, q_vector->name,
2505 q_vector);
2506 else
2507 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2508 0, q_vector->name, q_vector);
cdedef59 2509 if (err) {
19cce2c6
AV
2510 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2511 err);
cdedef59
AV
2512 goto free_q_irqs;
2513 }
2514
2515 /* register for affinity change notifications */
28bf2672
BC
2516 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2517 struct irq_affinity_notify *affinity_notify;
2518
2519 affinity_notify = &q_vector->affinity_notify;
2520 affinity_notify->notify = ice_irq_affinity_notify;
2521 affinity_notify->release = ice_irq_affinity_release;
2522 irq_set_affinity_notifier(irq_num, affinity_notify);
2523 }
cdedef59
AV
2524
2525 /* assign the mask for this irq */
2526 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2527 }
2528
d7442f51
AL
2529 err = ice_set_cpu_rx_rmap(vsi);
2530 if (err) {
2531 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2532 vsi->vsi_num, ERR_PTR(err));
2533 goto free_q_irqs;
2534 }
2535
cdedef59
AV
2536 vsi->irqs_ready = true;
2537 return 0;
2538
2539free_q_irqs:
2540 while (vector) {
2541 vector--;
28bf2672
BC
2542 irq_num = pf->msix_entries[base + vector].vector;
2543 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2544 irq_set_affinity_notifier(irq_num, NULL);
cdedef59 2545 irq_set_affinity_hint(irq_num, NULL);
4015d11e 2546 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
cdedef59
AV
2547 }
2548 return err;
2549}
2550
efc2214b
MF
2551/**
2552 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2553 * @vsi: VSI to setup Tx rings used by XDP
2554 *
2555 * Return 0 on success and negative value on error
2556 */
2557static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2558{
9a946843 2559 struct device *dev = ice_pf_to_dev(vsi->back);
9610bd98
MF
2560 struct ice_tx_desc *tx_desc;
2561 int i, j;
efc2214b 2562
2faf63b6 2563 ice_for_each_xdp_txq(vsi, i) {
efc2214b 2564 u16 xdp_q_idx = vsi->alloc_txq + i;
e72bba21 2565 struct ice_tx_ring *xdp_ring;
efc2214b
MF
2566
2567 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2568
2569 if (!xdp_ring)
2570 goto free_xdp_rings;
2571
2572 xdp_ring->q_index = xdp_q_idx;
2573 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
efc2214b
MF
2574 xdp_ring->vsi = vsi;
2575 xdp_ring->netdev = NULL;
2576 xdp_ring->dev = dev;
2577 xdp_ring->count = vsi->num_tx_desc;
3dd411ef
MF
2578 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
2579 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
b1d95cc2 2580 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
efc2214b
MF
2581 if (ice_setup_tx_ring(xdp_ring))
2582 goto free_xdp_rings;
2583 ice_set_ring_xdp(xdp_ring);
22bf877e 2584 spin_lock_init(&xdp_ring->tx_lock);
9610bd98
MF
2585 for (j = 0; j < xdp_ring->count; j++) {
2586 tx_desc = ICE_TX_DESC(xdp_ring, j);
e19778e6 2587 tx_desc->cmd_type_offset_bsz = 0;
9610bd98 2588 }
efc2214b
MF
2589 }
2590
2591 return 0;
2592
2593free_xdp_rings:
2594 for (; i >= 0; i--)
2595 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2596 ice_free_tx_ring(vsi->xdp_rings[i]);
2597 return -ENOMEM;
2598}
2599
2600/**
2601 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2602 * @vsi: VSI to set the bpf prog on
2603 * @prog: the bpf prog pointer
2604 */
2605static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2606{
2607 struct bpf_prog *old_prog;
2608 int i;
2609
2610 old_prog = xchg(&vsi->xdp_prog, prog);
2611 if (old_prog)
2612 bpf_prog_put(old_prog);
2613
2614 ice_for_each_rxq(vsi, i)
2615 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2616}
2617
2618/**
2619 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2620 * @vsi: VSI to bring up Tx rings used by XDP
2621 * @prog: bpf program that will be assigned to VSI
2622 *
2623 * Return 0 on success and negative value on error
2624 */
2625int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2626{
2627 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2628 int xdp_rings_rem = vsi->num_xdp_txq;
2629 struct ice_pf *pf = vsi->back;
2630 struct ice_qs_cfg xdp_qs_cfg = {
2631 .qs_mutex = &pf->avail_q_mutex,
2632 .pf_map = pf->avail_txqs,
2633 .pf_map_size = pf->max_pf_txqs,
2634 .q_count = vsi->num_xdp_txq,
2635 .scatter_count = ICE_MAX_SCATTER_TXQS,
2636 .vsi_map = vsi->txq_map,
2637 .vsi_map_offset = vsi->alloc_txq,
2638 .mapping_mode = ICE_VSI_MAP_CONTIG
2639 };
4015d11e 2640 struct device *dev;
efc2214b 2641 int i, v_idx;
5518ac2a 2642 int status;
efc2214b 2643
4015d11e
BC
2644 dev = ice_pf_to_dev(pf);
2645 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
efc2214b
MF
2646 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2647 if (!vsi->xdp_rings)
2648 return -ENOMEM;
2649
2650 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2651 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2652 goto err_map_xdp;
2653
22bf877e
MF
2654 if (static_key_enabled(&ice_xdp_locking_key))
2655 netdev_warn(vsi->netdev,
2656 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2657
efc2214b
MF
2658 if (ice_xdp_alloc_setup_rings(vsi))
2659 goto clear_xdp_rings;
2660
2661 /* follow the logic from ice_vsi_map_rings_to_vectors */
2662 ice_for_each_q_vector(vsi, v_idx) {
2663 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2664 int xdp_rings_per_v, q_id, q_base;
2665
2666 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2667 vsi->num_q_vectors - v_idx);
2668 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2669
2670 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
e72bba21 2671 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
efc2214b
MF
2672
2673 xdp_ring->q_vector = q_vector;
e72bba21
MF
2674 xdp_ring->next = q_vector->tx.tx_ring;
2675 q_vector->tx.tx_ring = xdp_ring;
efc2214b
MF
2676 }
2677 xdp_rings_rem -= xdp_rings_per_v;
2678 }
2679
9ead7e74
MF
2680 ice_for_each_rxq(vsi, i) {
2681 if (static_key_enabled(&ice_xdp_locking_key)) {
2682 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2683 } else {
2684 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2685 struct ice_tx_ring *ring;
2686
2687 ice_for_each_tx_ring(ring, q_vector->tx) {
2688 if (ice_ring_is_xdp(ring)) {
2689 vsi->rx_rings[i]->xdp_ring = ring;
2690 break;
2691 }
2692 }
2693 }
2694 ice_tx_xsk_pool(vsi, i);
2695 }
2696
efc2214b
MF
2697 /* omit the scheduler update if in reset path; XDP queues will be
2698 * taken into account at the end of ice_vsi_rebuild, where
2699 * ice_cfg_vsi_lan is being called
2700 */
2701 if (ice_is_reset_in_progress(pf->state))
2702 return 0;
2703
2704 /* tell the Tx scheduler that right now we have
2705 * additional queues
2706 */
2707 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2708 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2709
2710 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2711 max_txqs);
2712 if (status) {
5f87ec48
TN
2713 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2714 status);
efc2214b
MF
2715 goto clear_xdp_rings;
2716 }
f65ee535
MP
2717
2718 /* assign the prog only when it's not already present on VSI;
2719 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2720 * VSI rebuild that happens under ethtool -L can expose us to
2721 * the bpf_prog refcount issues as we would be swapping same
2722 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2723 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2724 * this is not harmful as dev_xdp_install bumps the refcount
2725 * before calling the op exposed by the driver;
2726 */
2727 if (!ice_is_xdp_ena_vsi(vsi))
2728 ice_vsi_assign_bpf_prog(vsi, prog);
efc2214b
MF
2729
2730 return 0;
2731clear_xdp_rings:
2faf63b6 2732 ice_for_each_xdp_txq(vsi, i)
efc2214b
MF
2733 if (vsi->xdp_rings[i]) {
2734 kfree_rcu(vsi->xdp_rings[i], rcu);
2735 vsi->xdp_rings[i] = NULL;
2736 }
2737
2738err_map_xdp:
2739 mutex_lock(&pf->avail_q_mutex);
2faf63b6 2740 ice_for_each_xdp_txq(vsi, i) {
efc2214b
MF
2741 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2742 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2743 }
2744 mutex_unlock(&pf->avail_q_mutex);
2745
4015d11e 2746 devm_kfree(dev, vsi->xdp_rings);
efc2214b
MF
2747 return -ENOMEM;
2748}
2749
2750/**
2751 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2752 * @vsi: VSI to remove XDP rings
2753 *
2754 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2755 * resources
2756 */
2757int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2758{
2759 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2760 struct ice_pf *pf = vsi->back;
2761 int i, v_idx;
2762
2763 /* q_vectors are freed in reset path so there's no point in detaching
ac382a09 2764 * rings; in case of rebuild being triggered not from reset bits
efc2214b
MF
2765 * in pf->state won't be set, so additionally check first q_vector
2766 * against NULL
2767 */
2768 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2769 goto free_qmap;
2770
2771 ice_for_each_q_vector(vsi, v_idx) {
2772 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
e72bba21 2773 struct ice_tx_ring *ring;
efc2214b 2774
e72bba21 2775 ice_for_each_tx_ring(ring, q_vector->tx)
efc2214b
MF
2776 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2777 break;
2778
2779 /* restore the value of last node prior to XDP setup */
e72bba21 2780 q_vector->tx.tx_ring = ring;
efc2214b
MF
2781 }
2782
2783free_qmap:
2784 mutex_lock(&pf->avail_q_mutex);
2faf63b6 2785 ice_for_each_xdp_txq(vsi, i) {
efc2214b
MF
2786 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2787 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2788 }
2789 mutex_unlock(&pf->avail_q_mutex);
2790
2faf63b6 2791 ice_for_each_xdp_txq(vsi, i)
efc2214b 2792 if (vsi->xdp_rings[i]) {
f9124c68
MF
2793 if (vsi->xdp_rings[i]->desc) {
2794 synchronize_rcu();
efc2214b 2795 ice_free_tx_ring(vsi->xdp_rings[i]);
f9124c68 2796 }
efc2214b
MF
2797 kfree_rcu(vsi->xdp_rings[i], rcu);
2798 vsi->xdp_rings[i] = NULL;
2799 }
2800
4015d11e 2801 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
efc2214b
MF
2802 vsi->xdp_rings = NULL;
2803
22bf877e
MF
2804 if (static_key_enabled(&ice_xdp_locking_key))
2805 static_branch_dec(&ice_xdp_locking_key);
2806
efc2214b
MF
2807 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2808 return 0;
2809
2810 ice_vsi_assign_bpf_prog(vsi, NULL);
2811
2812 /* notify Tx scheduler that we destroyed XDP queues and bring
2813 * back the old number of child nodes
2814 */
2815 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2816 max_txqs[i] = vsi->num_txq;
2817
c8f135c6
MP
2818 /* change number of XDP Tx queues to 0 */
2819 vsi->num_xdp_txq = 0;
2820
efc2214b
MF
2821 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2822 max_txqs);
2823}
2824
c7a21904
MS
2825/**
2826 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2827 * @vsi: VSI to schedule napi on
2828 */
2829static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2830{
2831 int i;
2832
2833 ice_for_each_rxq(vsi, i) {
e72bba21 2834 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
c7a21904
MS
2835
2836 if (rx_ring->xsk_pool)
2837 napi_schedule(&rx_ring->q_vector->napi);
2838 }
2839}
2840
22bf877e
MF
2841/**
2842 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2843 * @vsi: VSI to determine the count of XDP Tx qs
2844 *
2845 * returns 0 if Tx qs count is higher than at least half of CPU count,
2846 * -ENOMEM otherwise
2847 */
2848int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2849{
2850 u16 avail = ice_get_avail_txq_count(vsi->back);
2851 u16 cpus = num_possible_cpus();
2852
2853 if (avail < cpus / 2)
2854 return -ENOMEM;
2855
2856 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2857
2858 if (vsi->num_xdp_txq < cpus)
2859 static_branch_inc(&ice_xdp_locking_key);
2860
2861 return 0;
2862}
2863
efc2214b
MF
2864/**
2865 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2866 * @vsi: VSI to setup XDP for
2867 * @prog: XDP program
2868 * @extack: netlink extended ack
2869 */
2870static int
2871ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2872 struct netlink_ext_ack *extack)
2873{
2874 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2875 bool if_running = netif_running(vsi->netdev);
2876 int ret = 0, xdp_ring_err = 0;
2877
2878 if (frame_size > vsi->rx_buf_len) {
2879 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2880 return -EOPNOTSUPP;
2881 }
2882
2883 /* need to stop netdev while setting up the program for Rx rings */
e97fb1ae 2884 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
efc2214b
MF
2885 ret = ice_down(vsi);
2886 if (ret) {
af23635a 2887 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
efc2214b
MF
2888 return ret;
2889 }
2890 }
2891
2892 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
22bf877e
MF
2893 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2894 if (xdp_ring_err) {
2895 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2896 } else {
2897 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2898 if (xdp_ring_err)
2899 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2900 }
efc2214b
MF
2901 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2902 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2903 if (xdp_ring_err)
af23635a 2904 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
efc2214b 2905 } else {
f65ee535
MP
2906 /* safe to call even when prog == vsi->xdp_prog as
2907 * dev_xdp_install in net/core/dev.c incremented prog's
2908 * refcount so corresponding bpf_prog_put won't cause
2909 * underflow
2910 */
efc2214b
MF
2911 ice_vsi_assign_bpf_prog(vsi, prog);
2912 }
2913
2914 if (if_running)
2915 ret = ice_up(vsi);
2916
c7a21904
MS
2917 if (!ret && prog)
2918 ice_vsi_rx_napi_schedule(vsi);
2d4238f5 2919
efc2214b
MF
2920 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2921}
2922
ebc5399e
MF
2923/**
2924 * ice_xdp_safe_mode - XDP handler for safe mode
2925 * @dev: netdevice
2926 * @xdp: XDP command
2927 */
2928static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2929 struct netdev_bpf *xdp)
2930{
2931 NL_SET_ERR_MSG_MOD(xdp->extack,
2932 "Please provide working DDP firmware package in order to use XDP\n"
2933 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2934 return -EOPNOTSUPP;
2935}
2936
efc2214b
MF
2937/**
2938 * ice_xdp - implements XDP handler
2939 * @dev: netdevice
2940 * @xdp: XDP command
2941 */
2942static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2943{
2944 struct ice_netdev_priv *np = netdev_priv(dev);
2945 struct ice_vsi *vsi = np->vsi;
2946
2947 if (vsi->type != ICE_VSI_PF) {
af23635a 2948 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
efc2214b
MF
2949 return -EINVAL;
2950 }
2951
2952 switch (xdp->command) {
2953 case XDP_SETUP_PROG:
2954 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
1742b3d5
MK
2955 case XDP_SETUP_XSK_POOL:
2956 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2d4238f5 2957 xdp->xsk.queue_id);
efc2214b
MF
2958 default:
2959 return -EINVAL;
2960 }
2961}
2962
940b61af
AV
2963/**
2964 * ice_ena_misc_vector - enable the non-queue interrupts
2965 * @pf: board private structure
2966 */
2967static void ice_ena_misc_vector(struct ice_pf *pf)
2968{
2969 struct ice_hw *hw = &pf->hw;
2970 u32 val;
2971
9d5c5a52
PG
2972 /* Disable anti-spoof detection interrupt to prevent spurious event
2973 * interrupts during a function reset. Anti-spoof functionally is
2974 * still supported.
2975 */
2976 val = rd32(hw, GL_MDCK_TX_TDPU);
2977 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2978 wr32(hw, GL_MDCK_TX_TDPU, val);
2979
940b61af
AV
2980 /* clear things first */
2981 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
2982 rd32(hw, PFINT_OICR); /* read to clear */
2983
3bcd7fa3 2984 val = (PFINT_OICR_ECC_ERR_M |
940b61af
AV
2985 PFINT_OICR_MAL_DETECT_M |
2986 PFINT_OICR_GRST_M |
2987 PFINT_OICR_PCI_EXCEPTION_M |
007676b4 2988 PFINT_OICR_VFLR_M |
3bcd7fa3 2989 PFINT_OICR_HMC_ERR_M |
348048e7 2990 PFINT_OICR_PE_PUSH_M |
3bcd7fa3 2991 PFINT_OICR_PE_CRITERR_M);
940b61af
AV
2992
2993 wr32(hw, PFINT_OICR_ENA, val);
2994
2995 /* SW_ITR_IDX = 0, but don't change INTENA */
cbe66bfe 2996 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
940b61af
AV
2997 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2998}
2999
3000/**
3001 * ice_misc_intr - misc interrupt handler
3002 * @irq: interrupt number
3003 * @data: pointer to a q_vector
3004 */
3005static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3006{
3007 struct ice_pf *pf = (struct ice_pf *)data;
3008 struct ice_hw *hw = &pf->hw;
3009 irqreturn_t ret = IRQ_NONE;
4015d11e 3010 struct device *dev;
940b61af
AV
3011 u32 oicr, ena_mask;
3012
4015d11e 3013 dev = ice_pf_to_dev(pf);
7e408e07
AV
3014 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3015 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
8f5ee3c4 3016 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
940b61af
AV
3017
3018 oicr = rd32(hw, PFINT_OICR);
3019 ena_mask = rd32(hw, PFINT_OICR_ENA);
3020
0e674aeb
AV
3021 if (oicr & PFINT_OICR_SWINT_M) {
3022 ena_mask &= ~PFINT_OICR_SWINT_M;
3023 pf->sw_int_count++;
3024 }
3025
b3969fd7
SM
3026 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3027 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
7e408e07 3028 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
b3969fd7 3029 }
007676b4 3030 if (oicr & PFINT_OICR_VFLR_M) {
f844d521 3031 /* disable any further VFLR event notifications */
7e408e07 3032 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
f844d521
BC
3033 u32 reg = rd32(hw, PFINT_OICR_ENA);
3034
3035 reg &= ~PFINT_OICR_VFLR_M;
3036 wr32(hw, PFINT_OICR_ENA, reg);
3037 } else {
3038 ena_mask &= ~PFINT_OICR_VFLR_M;
7e408e07 3039 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
f844d521 3040 }
007676b4 3041 }
b3969fd7 3042
0b28b702
AV
3043 if (oicr & PFINT_OICR_GRST_M) {
3044 u32 reset;
b3969fd7 3045
0b28b702
AV
3046 /* we have a reset warning */
3047 ena_mask &= ~PFINT_OICR_GRST_M;
3048 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3049 GLGEN_RSTAT_RESET_TYPE_S;
3050
3051 if (reset == ICE_RESET_CORER)
3052 pf->corer_count++;
3053 else if (reset == ICE_RESET_GLOBR)
3054 pf->globr_count++;
ca4929b6 3055 else if (reset == ICE_RESET_EMPR)
0b28b702 3056 pf->empr_count++;
ca4929b6 3057 else
4015d11e 3058 dev_dbg(dev, "Invalid reset type %d\n", reset);
0b28b702
AV
3059
3060 /* If a reset cycle isn't already in progress, we set a bit in
3061 * pf->state so that the service task can start a reset/rebuild.
0b28b702 3062 */
7e408e07 3063 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
0b28b702 3064 if (reset == ICE_RESET_CORER)
7e408e07 3065 set_bit(ICE_CORER_RECV, pf->state);
0b28b702 3066 else if (reset == ICE_RESET_GLOBR)
7e408e07 3067 set_bit(ICE_GLOBR_RECV, pf->state);
0b28b702 3068 else
7e408e07 3069 set_bit(ICE_EMPR_RECV, pf->state);
0b28b702 3070
fd2a9817
AV
3071 /* There are couple of different bits at play here.
3072 * hw->reset_ongoing indicates whether the hardware is
3073 * in reset. This is set to true when a reset interrupt
3074 * is received and set back to false after the driver
3075 * has determined that the hardware is out of reset.
3076 *
7e408e07 3077 * ICE_RESET_OICR_RECV in pf->state indicates
fd2a9817
AV
3078 * that a post reset rebuild is required before the
3079 * driver is operational again. This is set above.
3080 *
3081 * As this is the start of the reset/rebuild cycle, set
3082 * both to indicate that.
3083 */
3084 hw->reset_ongoing = true;
0b28b702
AV
3085 }
3086 }
3087
ea9b847c
JK
3088 if (oicr & PFINT_OICR_TSYN_TX_M) {
3089 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3090 ice_ptp_process_ts(pf);
3091 }
3092
172db5f9
MM
3093 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3094 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3095 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3096
3097 /* Save EVENTs from GTSYN register */
3098 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3099 GLTSYN_STAT_EVENT1_M |
3100 GLTSYN_STAT_EVENT2_M);
3101 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3102 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3103 }
3104
348048e7
DE
3105#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3106 if (oicr & ICE_AUX_CRIT_ERR) {
32d53c0a
AL
3107 pf->oicr_err_reg |= oicr;
3108 set_bit(ICE_AUX_ERR_PENDING, pf->state);
348048e7 3109 ena_mask &= ~ICE_AUX_CRIT_ERR;
940b61af
AV
3110 }
3111
8d7189d2 3112 /* Report any remaining unexpected interrupts */
940b61af
AV
3113 oicr &= ena_mask;
3114 if (oicr) {
4015d11e 3115 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
940b61af
AV
3116 /* If a critical error is pending there is no choice but to
3117 * reset the device.
3118 */
348048e7 3119 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
0b28b702 3120 PFINT_OICR_ECC_ERR_M)) {
7e408e07 3121 set_bit(ICE_PFR_REQ, pf->state);
0b28b702
AV
3122 ice_service_task_schedule(pf);
3123 }
940b61af
AV
3124 }
3125 ret = IRQ_HANDLED;
3126
de75135b
AV
3127 ice_service_task_schedule(pf);
3128 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
3129
3130 return ret;
3131}
3132
0e04e8e1
BC
3133/**
3134 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3135 * @hw: pointer to HW structure
3136 */
3137static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3138{
3139 /* disable Admin queue Interrupt causes */
3140 wr32(hw, PFINT_FW_CTL,
3141 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3142
3143 /* disable Mailbox queue Interrupt causes */
3144 wr32(hw, PFINT_MBX_CTL,
3145 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3146
8f5ee3c4
JK
3147 wr32(hw, PFINT_SB_CTL,
3148 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3149
0e04e8e1
BC
3150 /* disable Control queue Interrupt causes */
3151 wr32(hw, PFINT_OICR_CTL,
3152 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3153
3154 ice_flush(hw);
3155}
3156
940b61af
AV
3157/**
3158 * ice_free_irq_msix_misc - Unroll misc vector setup
3159 * @pf: board private structure
3160 */
3161static void ice_free_irq_msix_misc(struct ice_pf *pf)
3162{
0e04e8e1
BC
3163 struct ice_hw *hw = &pf->hw;
3164
3165 ice_dis_ctrlq_interrupts(hw);
3166
940b61af 3167 /* disable OICR interrupt */
0e04e8e1
BC
3168 wr32(hw, PFINT_OICR_ENA, 0);
3169 ice_flush(hw);
940b61af 3170
ba880734 3171 if (pf->msix_entries) {
cbe66bfe 3172 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
4015d11e 3173 devm_free_irq(ice_pf_to_dev(pf),
cbe66bfe 3174 pf->msix_entries[pf->oicr_idx].vector, pf);
940b61af
AV
3175 }
3176
eb0208ec 3177 pf->num_avail_sw_msix += 1;
cbe66bfe 3178 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
940b61af
AV
3179}
3180
0e04e8e1
BC
3181/**
3182 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3183 * @hw: pointer to HW structure
b07833a0 3184 * @reg_idx: HW vector index to associate the control queue interrupts with
0e04e8e1 3185 */
b07833a0 3186static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
0e04e8e1
BC
3187{
3188 u32 val;
3189
b07833a0 3190 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
0e04e8e1
BC
3191 PFINT_OICR_CTL_CAUSE_ENA_M);
3192 wr32(hw, PFINT_OICR_CTL, val);
3193
3194 /* enable Admin queue Interrupt causes */
b07833a0 3195 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
0e04e8e1
BC
3196 PFINT_FW_CTL_CAUSE_ENA_M);
3197 wr32(hw, PFINT_FW_CTL, val);
3198
3199 /* enable Mailbox queue Interrupt causes */
b07833a0 3200 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
0e04e8e1
BC
3201 PFINT_MBX_CTL_CAUSE_ENA_M);
3202 wr32(hw, PFINT_MBX_CTL, val);
3203
8f5ee3c4
JK
3204 /* This enables Sideband queue Interrupt causes */
3205 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3206 PFINT_SB_CTL_CAUSE_ENA_M);
3207 wr32(hw, PFINT_SB_CTL, val);
3208
0e04e8e1
BC
3209 ice_flush(hw);
3210}
3211
940b61af
AV
3212/**
3213 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3214 * @pf: board private structure
3215 *
3216 * This sets up the handler for MSIX 0, which is used to manage the
df17b7e0 3217 * non-queue interrupts, e.g. AdminQ and errors. This is not used
940b61af
AV
3218 * when in MSI or Legacy interrupt mode.
3219 */
3220static int ice_req_irq_msix_misc(struct ice_pf *pf)
3221{
4015d11e 3222 struct device *dev = ice_pf_to_dev(pf);
940b61af
AV
3223 struct ice_hw *hw = &pf->hw;
3224 int oicr_idx, err = 0;
940b61af
AV
3225
3226 if (!pf->int_name[0])
3227 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
4015d11e 3228 dev_driver_string(dev), dev_name(dev));
940b61af 3229
0b28b702
AV
3230 /* Do not request IRQ but do enable OICR interrupt since settings are
3231 * lost during reset. Note that this function is called only during
3232 * rebuild path and not while reset is in progress.
3233 */
5df7e45d 3234 if (ice_is_reset_in_progress(pf->state))
0b28b702
AV
3235 goto skip_req_irq;
3236
cbe66bfe
BC
3237 /* reserve one vector in irq_tracker for misc interrupts */
3238 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
940b61af
AV
3239 if (oicr_idx < 0)
3240 return oicr_idx;
3241
eb0208ec 3242 pf->num_avail_sw_msix -= 1;
88865fc4 3243 pf->oicr_idx = (u16)oicr_idx;
940b61af 3244
4015d11e 3245 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
940b61af
AV
3246 ice_misc_intr, 0, pf->int_name, pf);
3247 if (err) {
4015d11e 3248 dev_err(dev, "devm_request_irq for %s failed: %d\n",
940b61af 3249 pf->int_name, err);
cbe66bfe 3250 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
eb0208ec 3251 pf->num_avail_sw_msix += 1;
940b61af
AV
3252 return err;
3253 }
3254
0b28b702 3255skip_req_irq:
940b61af
AV
3256 ice_ena_misc_vector(pf);
3257
cbe66bfe
BC
3258 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3259 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
63f545ed 3260 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
940b61af
AV
3261
3262 ice_flush(hw);
cdedef59 3263 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
3264
3265 return 0;
3266}
3267
3a858ba3 3268/**
df0f8479
AV
3269 * ice_napi_add - register NAPI handler for the VSI
3270 * @vsi: VSI for which NAPI handler is to be registered
3a858ba3 3271 *
df0f8479
AV
3272 * This function is only called in the driver's load path. Registering the NAPI
3273 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3274 * reset/rebuild, etc.)
3a858ba3 3275 */
df0f8479 3276static void ice_napi_add(struct ice_vsi *vsi)
3a858ba3 3277{
df0f8479 3278 int v_idx;
3a858ba3 3279
df0f8479 3280 if (!vsi->netdev)
3a858ba3 3281 return;
3a858ba3 3282
0c2561c8 3283 ice_for_each_q_vector(vsi, v_idx)
df0f8479
AV
3284 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3285 ice_napi_poll, NAPI_POLL_WEIGHT);
3a858ba3
AV
3286}
3287
3288/**
462acf6a
TN
3289 * ice_set_ops - set netdev and ethtools ops for the given netdev
3290 * @netdev: netdev instance
3a858ba3 3291 */
462acf6a 3292static void ice_set_ops(struct net_device *netdev)
3a858ba3 3293{
462acf6a
TN
3294 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3295
3296 if (ice_is_safe_mode(pf)) {
3297 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3298 ice_set_ethtool_safe_mode_ops(netdev);
3299 return;
3300 }
3301
3302 netdev->netdev_ops = &ice_netdev_ops;
b20e6c17 3303 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
462acf6a
TN
3304 ice_set_ethtool_ops(netdev);
3305}
3306
3307/**
3308 * ice_set_netdev_features - set features for the given netdev
3309 * @netdev: netdev instance
3310 */
3311static void ice_set_netdev_features(struct net_device *netdev)
3312{
3313 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1babaf77 3314 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
d76a60ba
AV
3315 netdev_features_t csumo_features;
3316 netdev_features_t vlano_features;
3317 netdev_features_t dflt_features;
3318 netdev_features_t tso_features;
3a858ba3 3319
462acf6a
TN
3320 if (ice_is_safe_mode(pf)) {
3321 /* safe mode */
3322 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3323 netdev->hw_features = netdev->features;
3324 return;
3325 }
3a858ba3 3326
d76a60ba
AV
3327 dflt_features = NETIF_F_SG |
3328 NETIF_F_HIGHDMA |
148beb61 3329 NETIF_F_NTUPLE |
d76a60ba
AV
3330 NETIF_F_RXHASH;
3331
3332 csumo_features = NETIF_F_RXCSUM |
3333 NETIF_F_IP_CSUM |
cf909e19 3334 NETIF_F_SCTP_CRC |
d76a60ba
AV
3335 NETIF_F_IPV6_CSUM;
3336
3337 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3338 NETIF_F_HW_VLAN_CTAG_TX |
3339 NETIF_F_HW_VLAN_CTAG_RX;
3340
1babaf77
BC
3341 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3342 if (is_dvm_ena)
3343 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3344
a4e82a81
TN
3345 tso_features = NETIF_F_TSO |
3346 NETIF_F_TSO_ECN |
3347 NETIF_F_TSO6 |
3348 NETIF_F_GSO_GRE |
3349 NETIF_F_GSO_UDP_TUNNEL |
3350 NETIF_F_GSO_GRE_CSUM |
3351 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3352 NETIF_F_GSO_PARTIAL |
3353 NETIF_F_GSO_IPXIP4 |
3354 NETIF_F_GSO_IPXIP6 |
a54e3b8c 3355 NETIF_F_GSO_UDP_L4;
d76a60ba 3356
a4e82a81
TN
3357 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3358 NETIF_F_GSO_GRE_CSUM;
3a858ba3 3359 /* set features that user can change */
d76a60ba
AV
3360 netdev->hw_features = dflt_features | csumo_features |
3361 vlano_features | tso_features;
3a858ba3 3362
a4e82a81 3363 /* add support for HW_CSUM on packets with MPLS header */
69e66c04
JD
3364 netdev->mpls_features = NETIF_F_HW_CSUM |
3365 NETIF_F_TSO |
3366 NETIF_F_TSO6;
a4e82a81 3367
3a858ba3
AV
3368 /* enable features */
3369 netdev->features |= netdev->hw_features;
0d08a441
KP
3370
3371 netdev->hw_features |= NETIF_F_HW_TC;
44ece4e1 3372 netdev->hw_features |= NETIF_F_LOOPBACK;
0d08a441 3373
d76a60ba
AV
3374 /* encap and VLAN devices inherit default, csumo and tso features */
3375 netdev->hw_enc_features |= dflt_features | csumo_features |
3376 tso_features;
3377 netdev->vlan_features |= dflt_features | csumo_features |
3378 tso_features;
1babaf77
BC
3379
3380 /* advertise support but don't enable by default since only one type of
3381 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3382 * type turns on the other has to be turned off. This is enforced by the
3383 * ice_fix_features() ndo callback.
3384 */
3385 if (is_dvm_ena)
3386 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3387 NETIF_F_HW_VLAN_STAG_TX;
462acf6a
TN
3388}
3389
3390/**
3391 * ice_cfg_netdev - Allocate, configure and register a netdev
3392 * @vsi: the VSI associated with the new netdev
3393 *
3394 * Returns 0 on success, negative value on failure
3395 */
3396static int ice_cfg_netdev(struct ice_vsi *vsi)
3397{
462acf6a
TN
3398 struct ice_netdev_priv *np;
3399 struct net_device *netdev;
3400 u8 mac_addr[ETH_ALEN];
1adf7ead 3401
462acf6a
TN
3402 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3403 vsi->alloc_rxq);
1e23f076
AV
3404 if (!netdev)
3405 return -ENOMEM;
462acf6a 3406
a476d72a 3407 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
462acf6a
TN
3408 vsi->netdev = netdev;
3409 np = netdev_priv(netdev);
3410 np->vsi = vsi;
3411
3412 ice_set_netdev_features(netdev);
3413
3414 ice_set_ops(netdev);
3a858ba3
AV
3415
3416 if (vsi->type == ICE_VSI_PF) {
c73bf3bd 3417 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3a858ba3 3418 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
f3956ebb 3419 eth_hw_addr_set(netdev, mac_addr);
3a858ba3
AV
3420 ether_addr_copy(netdev->perm_addr, mac_addr);
3421 }
3422
3423 netdev->priv_flags |= IFF_UNICAST_FLT;
3424
462acf6a
TN
3425 /* Setup netdev TC information */
3426 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
cdedef59 3427
3a858ba3
AV
3428 /* setup watchdog timeout value to be 5 second */
3429 netdev->watchdog_timeo = 5 * HZ;
3430
3431 netdev->min_mtu = ETH_MIN_MTU;
3432 netdev->max_mtu = ICE_MAX_MTU;
3433
3a858ba3
AV
3434 return 0;
3435}
3436
d76a60ba
AV
3437/**
3438 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3439 * @lut: Lookup table
3440 * @rss_table_size: Lookup table size
3441 * @rss_size: Range of queue number for hashing
3442 */
3443void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3444{
3445 u16 i;
3446
3447 for (i = 0; i < rss_table_size; i++)
3448 lut[i] = i % rss_size;
3449}
3450
0f9d5027
AV
3451/**
3452 * ice_pf_vsi_setup - Set up a PF VSI
3453 * @pf: board private structure
3454 * @pi: pointer to the port_info instance
3455 *
0e674aeb
AV
3456 * Returns pointer to the successfully allocated VSI software struct
3457 * on success, otherwise returns NULL on failure.
0f9d5027
AV
3458 */
3459static struct ice_vsi *
3460ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3461{
b03d519d 3462 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
0f9d5027
AV
3463}
3464
fbc7b27a
KP
3465static struct ice_vsi *
3466ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3467 struct ice_channel *ch)
3468{
b03d519d 3469 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
fbc7b27a
KP
3470}
3471
148beb61
HT
3472/**
3473 * ice_ctrl_vsi_setup - Set up a control VSI
3474 * @pf: board private structure
3475 * @pi: pointer to the port_info instance
3476 *
3477 * Returns pointer to the successfully allocated VSI software struct
3478 * on success, otherwise returns NULL on failure.
3479 */
3480static struct ice_vsi *
3481ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3482{
b03d519d 3483 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
148beb61
HT
3484}
3485
0e674aeb
AV
3486/**
3487 * ice_lb_vsi_setup - Set up a loopback VSI
3488 * @pf: board private structure
3489 * @pi: pointer to the port_info instance
3490 *
3491 * Returns pointer to the successfully allocated VSI software struct
3492 * on success, otherwise returns NULL on failure.
3493 */
3494struct ice_vsi *
3495ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3496{
b03d519d 3497 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
0e674aeb
AV
3498}
3499
d76a60ba 3500/**
f9867df6 3501 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
d76a60ba 3502 * @netdev: network interface to be adjusted
2bfefa2d 3503 * @proto: VLAN TPID
f9867df6 3504 * @vid: VLAN ID to be added
d76a60ba 3505 *
f9867df6 3506 * net_device_ops implementation for adding VLAN IDs
d76a60ba 3507 */
c8b7abdd 3508static int
2bfefa2d 3509ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
d76a60ba
AV
3510{
3511 struct ice_netdev_priv *np = netdev_priv(netdev);
c31af68a 3512 struct ice_vsi_vlan_ops *vlan_ops;
d76a60ba 3513 struct ice_vsi *vsi = np->vsi;
fb05ba12 3514 struct ice_vlan vlan;
5eda8afd 3515 int ret;
d76a60ba 3516
42f3efef
BC
3517 /* VLAN 0 is added by default during load/reset */
3518 if (!vid)
3519 return 0;
3520
1273f895
IV
3521 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3522 usleep_range(1000, 2000);
3523
3524 /* Add multicast promisc rule for the VLAN ID to be added if
3525 * all-multicast is currently enabled.
3526 */
3527 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3528 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3529 ICE_MCAST_VLAN_PROMISC_BITS,
3530 vid);
3531 if (ret)
3532 goto finish;
3533 }
3534
c31af68a 3535 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
4f74dcc1 3536
42f3efef
BC
3537 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3538 * packets aren't pruned by the device's internal switch on Rx
d76a60ba 3539 */
2bfefa2d 3540 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
c31af68a 3541 ret = vlan_ops->add_vlan(vsi, &vlan);
1273f895
IV
3542 if (ret)
3543 goto finish;
3544
3545 /* If all-multicast is currently enabled and this VLAN ID is only one
3546 * besides VLAN-0 we have to update look-up type of multicast promisc
3547 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3548 */
3549 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3550 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3551 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3552 ICE_MCAST_PROMISC_BITS, 0);
3553 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3554 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3555 }
3556
3557finish:
3558 clear_bit(ICE_CFG_BUSY, vsi->state);
5eda8afd
AA
3559
3560 return ret;
d76a60ba
AV
3561}
3562
d76a60ba 3563/**
f9867df6 3564 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
d76a60ba 3565 * @netdev: network interface to be adjusted
2bfefa2d 3566 * @proto: VLAN TPID
f9867df6 3567 * @vid: VLAN ID to be removed
d76a60ba 3568 *
f9867df6 3569 * net_device_ops implementation for removing VLAN IDs
d76a60ba 3570 */
c8b7abdd 3571static int
2bfefa2d 3572ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
d76a60ba
AV
3573{
3574 struct ice_netdev_priv *np = netdev_priv(netdev);
c31af68a 3575 struct ice_vsi_vlan_ops *vlan_ops;
d76a60ba 3576 struct ice_vsi *vsi = np->vsi;
fb05ba12 3577 struct ice_vlan vlan;
5eda8afd 3578 int ret;
d76a60ba 3579
42f3efef
BC
3580 /* don't allow removal of VLAN 0 */
3581 if (!vid)
3582 return 0;
3583
1273f895
IV
3584 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3585 usleep_range(1000, 2000);
3586
abddafd4
GS
3587 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3588 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3589 if (ret) {
3590 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3591 vsi->vsi_num);
3592 vsi->current_netdev_flags |= IFF_ALLMULTI;
3593 }
3594
c31af68a
BC
3595 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3596
bc42afa9 3597 /* Make sure VLAN delete is successful before updating VLAN
4f74dcc1 3598 * information
d76a60ba 3599 */
2bfefa2d 3600 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
c31af68a 3601 ret = vlan_ops->del_vlan(vsi, &vlan);
5eda8afd 3602 if (ret)
1273f895 3603 goto finish;
d76a60ba 3604
1273f895
IV
3605 /* Remove multicast promisc rule for the removed VLAN ID if
3606 * all-multicast is enabled.
3607 */
3608 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3609 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3610 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3611
3612 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3613 /* Update look-up type of multicast promisc rule for VLAN 0
3614 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3615 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3616 */
3617 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3618 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3619 ICE_MCAST_VLAN_PROMISC_BITS,
3620 0);
3621 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3622 ICE_MCAST_PROMISC_BITS, 0);
3623 }
3624 }
3625
3626finish:
3627 clear_bit(ICE_CFG_BUSY, vsi->state);
3628
3629 return ret;
d76a60ba
AV
3630}
3631
195bb48f
MS
3632/**
3633 * ice_rep_indr_tc_block_unbind
3634 * @cb_priv: indirection block private data
3635 */
3636static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3637{
3638 struct ice_indr_block_priv *indr_priv = cb_priv;
3639
3640 list_del(&indr_priv->list);
3641 kfree(indr_priv);
3642}
3643
3644/**
3645 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3646 * @vsi: VSI struct which has the netdev
3647 */
3648static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3649{
3650 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3651
3652 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3653 ice_rep_indr_tc_block_unbind);
3654}
3655
3656/**
3657 * ice_tc_indir_block_remove - clean indirect TC block notifications
3658 * @pf: PF structure
3659 */
3660static void ice_tc_indir_block_remove(struct ice_pf *pf)
3661{
3662 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3663
3664 if (!pf_vsi)
3665 return;
3666
3667 ice_tc_indir_block_unregister(pf_vsi);
3668}
3669
3670/**
3671 * ice_tc_indir_block_register - Register TC indirect block notifications
3672 * @vsi: VSI struct which has the netdev
3673 *
3674 * Returns 0 on success, negative value on failure
3675 */
3676static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3677{
3678 struct ice_netdev_priv *np;
3679
3680 if (!vsi || !vsi->netdev)
3681 return -EINVAL;
3682
3683 np = netdev_priv(vsi->netdev);
3684
3685 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3686 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3687}
3688
3a858ba3
AV
3689/**
3690 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3691 * @pf: board private structure
3692 *
3693 * Returns 0 on success, negative value on failure
3694 */
3695static int ice_setup_pf_sw(struct ice_pf *pf)
3696{
195bb48f 3697 struct device *dev = ice_pf_to_dev(pf);
a1ffafb0 3698 bool dvm = ice_is_dvm_ena(&pf->hw);
3a858ba3 3699 struct ice_vsi *vsi;
2ccc1c1c 3700 int status;
3a858ba3 3701
5df7e45d 3702 if (ice_is_reset_in_progress(pf->state))
0f9d5027
AV
3703 return -EBUSY;
3704
a1ffafb0
BC
3705 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
3706 if (status)
3707 return -EIO;
3708
0f9d5027 3709 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
135f4b9e
JK
3710 if (!vsi)
3711 return -ENOMEM;
3a858ba3 3712
fbc7b27a
KP
3713 /* init channel list */
3714 INIT_LIST_HEAD(&vsi->ch_list);
3715
df0f8479 3716 status = ice_cfg_netdev(vsi);
c1484691 3717 if (status)
df0f8479 3718 goto unroll_vsi_setup;
efc2214b
MF
3719 /* netdev has to be configured before setting frame size */
3720 ice_vsi_cfg_frame_size(vsi);
df0f8479 3721
195bb48f
MS
3722 /* init indirect block notifications */
3723 status = ice_tc_indir_block_register(vsi);
3724 if (status) {
3725 dev_err(dev, "Failed to register netdev notifier\n");
3726 goto unroll_cfg_netdev;
3727 }
3728
b94b013e
DE
3729 /* Setup DCB netlink interface */
3730 ice_dcbnl_setup(vsi);
3731
df0f8479
AV
3732 /* registering the NAPI handler requires both the queues and
3733 * netdev to be created, which are done in ice_pf_vsi_setup()
3734 * and ice_cfg_netdev() respectively
3735 */
3736 ice_napi_add(vsi);
3737
561f4379 3738 status = ice_init_mac_fltr(pf);
9daf8208 3739 if (status)
d7442f51 3740 goto unroll_napi_add;
9daf8208 3741
2ccc1c1c 3742 return 0;
9daf8208 3743
df0f8479 3744unroll_napi_add:
195bb48f
MS
3745 ice_tc_indir_block_unregister(vsi);
3746unroll_cfg_netdev:
3a858ba3 3747 if (vsi) {
df0f8479 3748 ice_napi_del(vsi);
3a858ba3 3749 if (vsi->netdev) {
a476d72a 3750 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3a858ba3
AV
3751 free_netdev(vsi->netdev);
3752 vsi->netdev = NULL;
3753 }
df0f8479 3754 }
9daf8208 3755
df0f8479 3756unroll_vsi_setup:
135f4b9e 3757 ice_vsi_release(vsi);
3a858ba3
AV
3758 return status;
3759}
3760
940b61af 3761/**
8c243700
AV
3762 * ice_get_avail_q_count - Get count of queues in use
3763 * @pf_qmap: bitmap to get queue use count from
3764 * @lock: pointer to a mutex that protects access to pf_qmap
3765 * @size: size of the bitmap
940b61af 3766 */
8c243700
AV
3767static u16
3768ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
940b61af 3769{
88865fc4
KK
3770 unsigned long bit;
3771 u16 count = 0;
940b61af 3772
8c243700
AV
3773 mutex_lock(lock);
3774 for_each_clear_bit(bit, pf_qmap, size)
3775 count++;
3776 mutex_unlock(lock);
940b61af 3777
8c243700
AV
3778 return count;
3779}
d76a60ba 3780
8c243700
AV
3781/**
3782 * ice_get_avail_txq_count - Get count of Tx queues in use
3783 * @pf: pointer to an ice_pf instance
3784 */
3785u16 ice_get_avail_txq_count(struct ice_pf *pf)
3786{
3787 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3788 pf->max_pf_txqs);
3789}
940b61af 3790
8c243700
AV
3791/**
3792 * ice_get_avail_rxq_count - Get count of Rx queues in use
3793 * @pf: pointer to an ice_pf instance
3794 */
3795u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3796{
3797 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3798 pf->max_pf_rxqs);
940b61af
AV
3799}
3800
3801/**
3802 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3803 * @pf: board private structure to initialize
3804 */
3805static void ice_deinit_pf(struct ice_pf *pf)
3806{
8d81fa55 3807 ice_service_task_stop(pf);
486b9eee 3808 mutex_destroy(&pf->adev_mutex);
940b61af 3809 mutex_destroy(&pf->sw_mutex);
b94b013e 3810 mutex_destroy(&pf->tc_mutex);
940b61af 3811 mutex_destroy(&pf->avail_q_mutex);
3d5985a1 3812 mutex_destroy(&pf->vfs.table_lock);
78b5713a
AV
3813
3814 if (pf->avail_txqs) {
3815 bitmap_free(pf->avail_txqs);
3816 pf->avail_txqs = NULL;
3817 }
3818
3819 if (pf->avail_rxqs) {
3820 bitmap_free(pf->avail_rxqs);
3821 pf->avail_rxqs = NULL;
3822 }
06c16d89
JK
3823
3824 if (pf->ptp.clock)
3825 ptp_clock_unregister(pf->ptp.clock);
940b61af
AV
3826}
3827
3828/**
462acf6a
TN
3829 * ice_set_pf_caps - set PFs capability flags
3830 * @pf: pointer to the PF instance
940b61af 3831 */
462acf6a 3832static void ice_set_pf_caps(struct ice_pf *pf)
940b61af 3833{
462acf6a
TN
3834 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3835
d25a0fc4 3836 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
88f62aea 3837 if (func_caps->common_cap.rdma)
d25a0fc4 3838 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
462acf6a
TN
3839 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3840 if (func_caps->common_cap.dcb)
80739b57 3841 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
462acf6a
TN
3842 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3843 if (func_caps->common_cap.sr_iov_1_1) {
75d2b253 3844 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
000773c0 3845 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
dc36796e 3846 ICE_MAX_SRIOV_VFS);
75d2b253 3847 }
462acf6a
TN
3848 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3849 if (func_caps->common_cap.rss_table_size)
3850 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
940b61af 3851
148beb61
HT
3852 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3853 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3854 u16 unused;
3855
3856 /* ctrl_vsi_idx will be set to a valid value when flow director
3857 * is setup by ice_init_fdir
3858 */
3859 pf->ctrl_vsi_idx = ICE_NO_VSI;
3860 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3861 /* force guaranteed filter pool for PF */
3862 ice_alloc_fd_guar_item(&pf->hw, &unused,
3863 func_caps->fd_fltr_guar);
3864 /* force shared filter pool for PF */
3865 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3866 func_caps->fd_fltr_best_effort);
3867 }
3868
06c16d89
JK
3869 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3870 if (func_caps->common_cap.ieee_1588)
3871 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3872
462acf6a
TN
3873 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3874 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3875}
940b61af 3876
462acf6a
TN
3877/**
3878 * ice_init_pf - Initialize general software structures (struct ice_pf)
3879 * @pf: board private structure to initialize
3880 */
3881static int ice_init_pf(struct ice_pf *pf)
3882{
3883 ice_set_pf_caps(pf);
3884
3885 mutex_init(&pf->sw_mutex);
b94b013e 3886 mutex_init(&pf->tc_mutex);
486b9eee 3887 mutex_init(&pf->adev_mutex);
d76a60ba 3888
d69ea414
JK
3889 INIT_HLIST_HEAD(&pf->aq_wait_list);
3890 spin_lock_init(&pf->aq_wait_lock);
3891 init_waitqueue_head(&pf->aq_wait_queue);
3892
1c08052e
JK
3893 init_waitqueue_head(&pf->reset_wait_queue);
3894
940b61af
AV
3895 /* setup service timer and periodic service task */
3896 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3897 pf->serv_tmr_period = HZ;
3898 INIT_WORK(&pf->serv_task, ice_service_task);
7e408e07 3899 clear_bit(ICE_SERVICE_SCHED, pf->state);
78b5713a 3900
462acf6a 3901 mutex_init(&pf->avail_q_mutex);
78b5713a
AV
3902 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3903 if (!pf->avail_txqs)
3904 return -ENOMEM;
3905
3906 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3907 if (!pf->avail_rxqs) {
4015d11e 3908 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
78b5713a
AV
3909 pf->avail_txqs = NULL;
3910 return -ENOMEM;
3911 }
3912
3d5985a1
JK
3913 mutex_init(&pf->vfs.table_lock);
3914 hash_init(pf->vfs.table);
3915
78b5713a 3916 return 0;
940b61af
AV
3917}
3918
3919/**
3920 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3921 * @pf: board private structure
3922 *
3923 * compute the number of MSIX vectors required (v_budget) and request from
3924 * the OS. Return the number of vectors reserved or negative on failure
3925 */
3926static int ice_ena_msix_range(struct ice_pf *pf)
3927{
d25a0fc4 3928 int num_cpus, v_left, v_actual, v_other, v_budget = 0;
4015d11e 3929 struct device *dev = ice_pf_to_dev(pf);
940b61af
AV
3930 int needed, err, i;
3931
3932 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
d25a0fc4 3933 num_cpus = num_online_cpus();
940b61af 3934
741106f7
TN
3935 /* reserve for LAN miscellaneous handler */
3936 needed = ICE_MIN_LAN_OICR_MSIX;
152b978a
AV
3937 if (v_left < needed)
3938 goto no_hw_vecs_left_err;
940b61af
AV
3939 v_budget += needed;
3940 v_left -= needed;
3941
741106f7 3942 /* reserve for flow director */
148beb61
HT
3943 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3944 needed = ICE_FDIR_MSIX;
3945 if (v_left < needed)
3946 goto no_hw_vecs_left_err;
3947 v_budget += needed;
3948 v_left -= needed;
3949 }
3950
f66756e0
GN
3951 /* reserve for switchdev */
3952 needed = ICE_ESWITCH_MSIX;
3953 if (v_left < needed)
3954 goto no_hw_vecs_left_err;
3955 v_budget += needed;
3956 v_left -= needed;
3957
741106f7
TN
3958 /* total used for non-traffic vectors */
3959 v_other = v_budget;
3960
3961 /* reserve vectors for LAN traffic */
d25a0fc4 3962 needed = num_cpus;
741106f7
TN
3963 if (v_left < needed)
3964 goto no_hw_vecs_left_err;
3965 pf->num_lan_msix = needed;
3966 v_budget += needed;
3967 v_left -= needed;
3968
d25a0fc4 3969 /* reserve vectors for RDMA auxiliary driver */
88f62aea 3970 if (ice_is_rdma_ena(pf)) {
d25a0fc4
DE
3971 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3972 if (v_left < needed)
3973 goto no_hw_vecs_left_err;
3974 pf->num_rdma_msix = needed;
3975 v_budget += needed;
3976 v_left -= needed;
3977 }
3978
4015d11e 3979 pf->msix_entries = devm_kcalloc(dev, v_budget,
c6dfd690 3980 sizeof(*pf->msix_entries), GFP_KERNEL);
940b61af
AV
3981 if (!pf->msix_entries) {
3982 err = -ENOMEM;
3983 goto exit_err;
3984 }
3985
3986 for (i = 0; i < v_budget; i++)
3987 pf->msix_entries[i].entry = i;
3988
3989 /* actually reserve the vectors */
3990 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3991 ICE_MIN_MSIX, v_budget);
940b61af 3992 if (v_actual < 0) {
4015d11e 3993 dev_err(dev, "unable to reserve MSI-X vectors\n");
940b61af
AV
3994 err = v_actual;
3995 goto msix_err;
3996 }
3997
3998 if (v_actual < v_budget) {
19cce2c6 3999 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
940b61af 4000 v_budget, v_actual);
152b978a 4001
f3fe97f6 4002 if (v_actual < ICE_MIN_MSIX) {
152b978a 4003 /* error if we can't get minimum vectors */
940b61af
AV
4004 pci_disable_msix(pf->pdev);
4005 err = -ERANGE;
4006 goto msix_err;
152b978a 4007 } else {
d25a0fc4
DE
4008 int v_remain = v_actual - v_other;
4009 int v_rdma = 0, v_min_rdma = 0;
4010
88f62aea 4011 if (ice_is_rdma_ena(pf)) {
d25a0fc4
DE
4012 /* Need at least 1 interrupt in addition to
4013 * AEQ MSIX
4014 */
4015 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
4016 v_min_rdma = ICE_MIN_RDMA_MSIX;
4017 }
741106f7
TN
4018
4019 if (v_actual == ICE_MIN_MSIX ||
d25a0fc4
DE
4020 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
4021 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
4022 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4023
4024 pf->num_rdma_msix = 0;
741106f7 4025 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
d25a0fc4
DE
4026 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
4027 (v_remain - v_rdma < v_rdma)) {
4028 /* Support minimum RDMA and give remaining
4029 * vectors to LAN MSIX
4030 */
4031 pf->num_rdma_msix = v_min_rdma;
4032 pf->num_lan_msix = v_remain - v_min_rdma;
4033 } else {
4034 /* Split remaining MSIX with RDMA after
4035 * accounting for AEQ MSIX
4036 */
4037 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
4038 ICE_RDMA_NUM_AEQ_MSIX;
4039 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
4040 }
741106f7
TN
4041
4042 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4043 pf->num_lan_msix);
d25a0fc4 4044
88f62aea 4045 if (ice_is_rdma_ena(pf))
d25a0fc4
DE
4046 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4047 pf->num_rdma_msix);
940b61af
AV
4048 }
4049 }
4050
4051 return v_actual;
4052
4053msix_err:
4015d11e 4054 devm_kfree(dev, pf->msix_entries);
940b61af
AV
4055 goto exit_err;
4056
152b978a 4057no_hw_vecs_left_err:
19cce2c6 4058 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
152b978a
AV
4059 needed, v_left);
4060 err = -ERANGE;
940b61af 4061exit_err:
d25a0fc4 4062 pf->num_rdma_msix = 0;
940b61af 4063 pf->num_lan_msix = 0;
940b61af
AV
4064 return err;
4065}
4066
4067/**
4068 * ice_dis_msix - Disable MSI-X interrupt setup in OS
4069 * @pf: board private structure
4070 */
4071static void ice_dis_msix(struct ice_pf *pf)
4072{
4073 pci_disable_msix(pf->pdev);
4015d11e 4074 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
940b61af 4075 pf->msix_entries = NULL;
940b61af
AV
4076}
4077
eb0208ec
PB
4078/**
4079 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4080 * @pf: board private structure
4081 */
4082static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4083{
ba880734 4084 ice_dis_msix(pf);
eb0208ec 4085
cbe66bfe 4086 if (pf->irq_tracker) {
4015d11e 4087 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
cbe66bfe 4088 pf->irq_tracker = NULL;
eb0208ec
PB
4089 }
4090}
4091
940b61af
AV
4092/**
4093 * ice_init_interrupt_scheme - Determine proper interrupt scheme
4094 * @pf: board private structure to initialize
4095 */
4096static int ice_init_interrupt_scheme(struct ice_pf *pf)
4097{
cbe66bfe 4098 int vectors;
940b61af 4099
ba880734 4100 vectors = ice_ena_msix_range(pf);
940b61af
AV
4101
4102 if (vectors < 0)
4103 return vectors;
4104
4105 /* set up vector assignment tracking */
e94c0df9
GS
4106 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4107 struct_size(pf->irq_tracker, list, vectors),
4108 GFP_KERNEL);
cbe66bfe 4109 if (!pf->irq_tracker) {
940b61af
AV
4110 ice_dis_msix(pf);
4111 return -ENOMEM;
4112 }
4113
eb0208ec 4114 /* populate SW interrupts pool with number of OS granted IRQs. */
88865fc4
KK
4115 pf->num_avail_sw_msix = (u16)vectors;
4116 pf->irq_tracker->num_entries = (u16)vectors;
cbe66bfe 4117 pf->irq_tracker->end = pf->irq_tracker->num_entries;
eb0208ec
PB
4118
4119 return 0;
940b61af
AV
4120}
4121
769c500d 4122/**
31765519
AV
4123 * ice_is_wol_supported - check if WoL is supported
4124 * @hw: pointer to hardware info
769c500d
AA
4125 *
4126 * Check if WoL is supported based on the HW configuration.
4127 * Returns true if NVM supports and enables WoL for this port, false otherwise
4128 */
31765519 4129bool ice_is_wol_supported(struct ice_hw *hw)
769c500d 4130{
769c500d
AA
4131 u16 wol_ctrl;
4132
4133 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4134 * word) indicates WoL is not supported on the corresponding PF ID.
4135 */
4136 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4137 return false;
4138
31765519 4139 return !(BIT(hw->port_info->lport) & wol_ctrl);
769c500d
AA
4140}
4141
87324e74
HT
4142/**
4143 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4144 * @vsi: VSI being changed
4145 * @new_rx: new number of Rx queues
4146 * @new_tx: new number of Tx queues
4147 *
4148 * Only change the number of queues if new_tx, or new_rx is non-0.
4149 *
4150 * Returns 0 on success.
4151 */
4152int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
4153{
4154 struct ice_pf *pf = vsi->back;
4155 int err = 0, timeout = 50;
4156
4157 if (!new_rx && !new_tx)
4158 return -EINVAL;
4159
7e408e07 4160 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
87324e74
HT
4161 timeout--;
4162 if (!timeout)
4163 return -EBUSY;
4164 usleep_range(1000, 2000);
4165 }
4166
4167 if (new_tx)
88865fc4 4168 vsi->req_txq = (u16)new_tx;
87324e74 4169 if (new_rx)
88865fc4 4170 vsi->req_rxq = (u16)new_rx;
87324e74
HT
4171
4172 /* set for the next time the netdev is started */
4173 if (!netif_running(vsi->netdev)) {
4174 ice_vsi_rebuild(vsi, false);
4175 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4176 goto done;
4177 }
4178
4179 ice_vsi_close(vsi);
4180 ice_vsi_rebuild(vsi, false);
4181 ice_pf_dcb_recfg(pf);
4182 ice_vsi_open(vsi);
4183done:
7e408e07 4184 clear_bit(ICE_CFG_BUSY, pf->state);
87324e74
HT
4185 return err;
4186}
4187
cd1f56f4
BC
4188/**
4189 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4190 * @pf: PF to configure
4191 *
4192 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4193 * VSI can still Tx/Rx VLAN tagged packets.
4194 */
4195static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4196{
4197 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4198 struct ice_vsi_ctx *ctxt;
cd1f56f4 4199 struct ice_hw *hw;
5518ac2a 4200 int status;
cd1f56f4
BC
4201
4202 if (!vsi)
4203 return;
4204
4205 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4206 if (!ctxt)
4207 return;
4208
4209 hw = &pf->hw;
4210 ctxt->info = vsi->info;
4211
4212 ctxt->info.valid_sections =
4213 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4214 ICE_AQ_VSI_PROP_SECURITY_VALID |
4215 ICE_AQ_VSI_PROP_SW_VALID);
4216
4217 /* disable VLAN anti-spoof */
4218 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4219 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4220
4221 /* disable VLAN pruning and keep all other settings */
4222 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4223
4224 /* allow all VLANs on Tx and don't strip on Rx */
7bd527aa
BC
4225 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4226 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
cd1f56f4
BC
4227
4228 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4229 if (status) {
5f87ec48 4230 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
5518ac2a 4231 status, ice_aq_str(hw->adminq.sq_last_status));
cd1f56f4
BC
4232 } else {
4233 vsi->info.sec_flags = ctxt->info.sec_flags;
4234 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
7bd527aa 4235 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
cd1f56f4
BC
4236 }
4237
4238 kfree(ctxt);
4239}
4240
462acf6a
TN
4241/**
4242 * ice_log_pkg_init - log result of DDP package load
4243 * @hw: pointer to hardware info
247dd97d 4244 * @state: state of package load
462acf6a 4245 */
247dd97d 4246static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
462acf6a 4247{
247dd97d
WD
4248 struct ice_pf *pf = hw->back;
4249 struct device *dev;
462acf6a 4250
247dd97d
WD
4251 dev = ice_pf_to_dev(pf);
4252
4253 switch (state) {
4254 case ICE_DDP_PKG_SUCCESS:
4255 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4256 hw->active_pkg_name,
4257 hw->active_pkg_ver.major,
4258 hw->active_pkg_ver.minor,
4259 hw->active_pkg_ver.update,
4260 hw->active_pkg_ver.draft);
4261 break;
4262 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4263 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4264 hw->active_pkg_name,
4265 hw->active_pkg_ver.major,
4266 hw->active_pkg_ver.minor,
4267 hw->active_pkg_ver.update,
4268 hw->active_pkg_ver.draft);
4269 break;
4270 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4271 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4272 hw->active_pkg_name,
4273 hw->active_pkg_ver.major,
4274 hw->active_pkg_ver.minor,
4275 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4276 break;
4277 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4278 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4279 hw->active_pkg_name,
4280 hw->active_pkg_ver.major,
4281 hw->active_pkg_ver.minor,
4282 hw->active_pkg_ver.update,
4283 hw->active_pkg_ver.draft,
4284 hw->pkg_name,
4285 hw->pkg_ver.major,
4286 hw->pkg_ver.minor,
4287 hw->pkg_ver.update,
4288 hw->pkg_ver.draft);
462acf6a 4289 break;
247dd97d 4290 case ICE_DDP_PKG_FW_MISMATCH:
b8272919
VR
4291 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4292 break;
247dd97d 4293 case ICE_DDP_PKG_INVALID_FILE:
19cce2c6 4294 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
462acf6a 4295 break;
247dd97d
WD
4296 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4297 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
462acf6a 4298 break;
247dd97d
WD
4299 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4300 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4301 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4302 break;
4303 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4304 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4305 break;
4306 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4307 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4308 break;
4309 case ICE_DDP_PKG_LOAD_ERROR:
4310 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
0092db5f
JB
4311 /* poll for reset to complete */
4312 if (ice_check_reset(hw))
4313 dev_err(dev, "Error resetting device. Please reload the driver\n");
462acf6a 4314 break;
247dd97d
WD
4315 case ICE_DDP_PKG_ERR:
4316 default:
4317 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
0092db5f 4318 break;
462acf6a
TN
4319 }
4320}
4321
4322/**
4323 * ice_load_pkg - load/reload the DDP Package file
4324 * @firmware: firmware structure when firmware requested or NULL for reload
4325 * @pf: pointer to the PF instance
4326 *
4327 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4328 * initialize HW tables.
4329 */
4330static void
4331ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4332{
247dd97d 4333 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4015d11e 4334 struct device *dev = ice_pf_to_dev(pf);
462acf6a
TN
4335 struct ice_hw *hw = &pf->hw;
4336
4337 /* Load DDP Package */
4338 if (firmware && !hw->pkg_copy) {
247dd97d
WD
4339 state = ice_copy_and_init_pkg(hw, firmware->data,
4340 firmware->size);
4341 ice_log_pkg_init(hw, state);
462acf6a
TN
4342 } else if (!firmware && hw->pkg_copy) {
4343 /* Reload package during rebuild after CORER/GLOBR reset */
247dd97d
WD
4344 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4345 ice_log_pkg_init(hw, state);
462acf6a 4346 } else {
19cce2c6 4347 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
462acf6a
TN
4348 }
4349
247dd97d 4350 if (!ice_is_init_pkg_successful(state)) {
462acf6a
TN
4351 /* Safe Mode */
4352 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4353 return;
4354 }
4355
4356 /* Successful download package is the precondition for advanced
4357 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4358 */
4359 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4360}
4361
c585ea42
BC
4362/**
4363 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4364 * @pf: pointer to the PF structure
4365 *
4366 * There is no error returned here because the driver should be able to handle
4367 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4368 * specifically with Tx.
4369 */
4370static void ice_verify_cacheline_size(struct ice_pf *pf)
4371{
4372 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
19cce2c6 4373 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
c585ea42
BC
4374 ICE_CACHE_LINE_BYTES);
4375}
4376
e3710a01
PSJ
4377/**
4378 * ice_send_version - update firmware with driver version
4379 * @pf: PF struct
4380 *
d54699e2 4381 * Returns 0 on success, else error code
e3710a01 4382 */
5e24d598 4383static int ice_send_version(struct ice_pf *pf)
e3710a01
PSJ
4384{
4385 struct ice_driver_ver dv;
4386
34a2a3b8
JK
4387 dv.major_ver = 0xff;
4388 dv.minor_ver = 0xff;
4389 dv.build_ver = 0xff;
e3710a01 4390 dv.subbuild_ver = 0;
34a2a3b8 4391 strscpy((char *)dv.driver_string, UTS_RELEASE,
e3710a01
PSJ
4392 sizeof(dv.driver_string));
4393 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4394}
4395
148beb61
HT
4396/**
4397 * ice_init_fdir - Initialize flow director VSI and configuration
4398 * @pf: pointer to the PF instance
4399 *
4400 * returns 0 on success, negative on error
4401 */
4402static int ice_init_fdir(struct ice_pf *pf)
4403{
4404 struct device *dev = ice_pf_to_dev(pf);
4405 struct ice_vsi *ctrl_vsi;
4406 int err;
4407
4408 /* Side Band Flow Director needs to have a control VSI.
4409 * Allocate it and store it in the PF.
4410 */
4411 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4412 if (!ctrl_vsi) {
4413 dev_dbg(dev, "could not create control VSI\n");
4414 return -ENOMEM;
4415 }
4416
4417 err = ice_vsi_open_ctrl(ctrl_vsi);
4418 if (err) {
4419 dev_dbg(dev, "could not open control VSI\n");
4420 goto err_vsi_open;
4421 }
4422
4423 mutex_init(&pf->hw.fdir_fltr_lock);
4424
4425 err = ice_fdir_create_dflt_rules(pf);
4426 if (err)
4427 goto err_fdir_rule;
4428
4429 return 0;
4430
4431err_fdir_rule:
4432 ice_fdir_release_flows(&pf->hw);
4433 ice_vsi_close(ctrl_vsi);
4434err_vsi_open:
4435 ice_vsi_release(ctrl_vsi);
4436 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4437 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4438 pf->ctrl_vsi_idx = ICE_NO_VSI;
4439 }
4440 return err;
4441}
4442
462acf6a
TN
4443/**
4444 * ice_get_opt_fw_name - return optional firmware file name or NULL
4445 * @pf: pointer to the PF instance
4446 */
4447static char *ice_get_opt_fw_name(struct ice_pf *pf)
4448{
4449 /* Optional firmware name same as default with additional dash
4450 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4451 */
4452 struct pci_dev *pdev = pf->pdev;
ceb2f007
JK
4453 char *opt_fw_filename;
4454 u64 dsn;
462acf6a
TN
4455
4456 /* Determine the name of the optional file using the DSN (two
4457 * dwords following the start of the DSN Capability).
4458 */
ceb2f007
JK
4459 dsn = pci_get_dsn(pdev);
4460 if (!dsn)
4461 return NULL;
4462
4463 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4464 if (!opt_fw_filename)
4465 return NULL;
4466
1a9c561a 4467 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ceb2f007 4468 ICE_DDP_PKG_PATH, dsn);
462acf6a
TN
4469
4470 return opt_fw_filename;
4471}
4472
4473/**
4474 * ice_request_fw - Device initialization routine
4475 * @pf: pointer to the PF instance
4476 */
4477static void ice_request_fw(struct ice_pf *pf)
4478{
4479 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4480 const struct firmware *firmware = NULL;
4015d11e 4481 struct device *dev = ice_pf_to_dev(pf);
462acf6a
TN
4482 int err = 0;
4483
4484 /* optional device-specific DDP (if present) overrides the default DDP
4485 * package file. kernel logs a debug message if the file doesn't exist,
4486 * and warning messages for other errors.
4487 */
4488 if (opt_fw_filename) {
4489 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4490 if (err) {
4491 kfree(opt_fw_filename);
4492 goto dflt_pkg_load;
4493 }
4494
4495 /* request for firmware was successful. Download to device */
4496 ice_load_pkg(firmware, pf);
4497 kfree(opt_fw_filename);
4498 release_firmware(firmware);
4499 return;
4500 }
4501
4502dflt_pkg_load:
4503 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4504 if (err) {
19cce2c6 4505 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
462acf6a
TN
4506 return;
4507 }
4508
4509 /* request for firmware was successful. Download to device */
4510 ice_load_pkg(firmware, pf);
4511 release_firmware(firmware);
4512}
4513
769c500d
AA
4514/**
4515 * ice_print_wake_reason - show the wake up cause in the log
4516 * @pf: pointer to the PF struct
4517 */
4518static void ice_print_wake_reason(struct ice_pf *pf)
4519{
4520 u32 wus = pf->wakeup_reason;
4521 const char *wake_str;
4522
4523 /* if no wake event, nothing to print */
4524 if (!wus)
4525 return;
4526
4527 if (wus & PFPM_WUS_LNKC_M)
4528 wake_str = "Link\n";
4529 else if (wus & PFPM_WUS_MAG_M)
4530 wake_str = "Magic Packet\n";
4531 else if (wus & PFPM_WUS_MNG_M)
4532 wake_str = "Management\n";
4533 else if (wus & PFPM_WUS_FW_RST_WK_M)
4534 wake_str = "Firmware Reset\n";
4535 else
4536 wake_str = "Unknown\n";
4537
4538 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4539}
4540
1e23f076
AV
4541/**
4542 * ice_register_netdev - register netdev and devlink port
4543 * @pf: pointer to the PF struct
4544 */
4545static int ice_register_netdev(struct ice_pf *pf)
4546{
4547 struct ice_vsi *vsi;
4548 int err = 0;
4549
4550 vsi = ice_get_main_vsi(pf);
4551 if (!vsi || !vsi->netdev)
4552 return -EIO;
4553
4554 err = register_netdev(vsi->netdev);
4555 if (err)
4556 goto err_register_netdev;
4557
a476d72a 4558 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
1e23f076
AV
4559 netif_carrier_off(vsi->netdev);
4560 netif_tx_stop_all_queues(vsi->netdev);
2ae0aa47 4561 err = ice_devlink_create_pf_port(pf);
1e23f076
AV
4562 if (err)
4563 goto err_devlink_create;
4564
2ae0aa47 4565 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
1e23f076
AV
4566
4567 return 0;
4568err_devlink_create:
4569 unregister_netdev(vsi->netdev);
a476d72a 4570 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
1e23f076
AV
4571err_register_netdev:
4572 free_netdev(vsi->netdev);
4573 vsi->netdev = NULL;
a476d72a 4574 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
1e23f076
AV
4575 return err;
4576}
4577
837f08fd
AV
4578/**
4579 * ice_probe - Device initialization routine
4580 * @pdev: PCI device information struct
4581 * @ent: entry in ice_pci_tbl
4582 *
4583 * Returns 0 on success, negative on failure
4584 */
c8b7abdd
BA
4585static int
4586ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
837f08fd 4587{
77ed84f4 4588 struct device *dev = &pdev->dev;
837f08fd
AV
4589 struct ice_pf *pf;
4590 struct ice_hw *hw;
b20e6c17 4591 int i, err;
837f08fd 4592
50ac7479
AV
4593 if (pdev->is_virtfn) {
4594 dev_err(dev, "can't probe a virtual function\n");
4595 return -EINVAL;
4596 }
4597
4ee656bb
TN
4598 /* this driver uses devres, see
4599 * Documentation/driver-api/driver-model/devres.rst
4600 */
837f08fd
AV
4601 err = pcim_enable_device(pdev);
4602 if (err)
4603 return err;
4604
80ad6dde 4605 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
837f08fd 4606 if (err) {
77ed84f4 4607 dev_err(dev, "BAR0 I/O map error %d\n", err);
837f08fd
AV
4608 return err;
4609 }
4610
1adf7ead 4611 pf = ice_allocate_pf(dev);
837f08fd
AV
4612 if (!pf)
4613 return -ENOMEM;
4614
73e30a62
DE
4615 /* initialize Auxiliary index to invalid value */
4616 pf->aux_idx = -1;
4617
2f2da36e 4618 /* set up for high or low DMA */
77ed84f4 4619 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
837f08fd 4620 if (err) {
77ed84f4 4621 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
837f08fd
AV
4622 return err;
4623 }
4624
4625 pci_enable_pcie_error_reporting(pdev);
4626 pci_set_master(pdev);
4627
4628 pf->pdev = pdev;
4629 pci_set_drvdata(pdev, pf);
7e408e07 4630 set_bit(ICE_DOWN, pf->state);
8d81fa55 4631 /* Disable service task until DOWN bit is cleared */
7e408e07 4632 set_bit(ICE_SERVICE_DIS, pf->state);
837f08fd
AV
4633
4634 hw = &pf->hw;
4635 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4e56802e
MS
4636 pci_save_state(pdev);
4637
837f08fd
AV
4638 hw->back = pf;
4639 hw->vendor_id = pdev->vendor;
4640 hw->device_id = pdev->device;
4641 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4642 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4643 hw->subsystem_device_id = pdev->subsystem_device;
4644 hw->bus.device = PCI_SLOT(pdev->devfn);
4645 hw->bus.func = PCI_FUNC(pdev->devfn);
f31e4b6f
AV
4646 ice_set_ctrlq_len(hw);
4647
837f08fd
AV
4648 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4649
7ec59eea
AV
4650#ifndef CONFIG_DYNAMIC_DEBUG
4651 if (debug < -1)
4652 hw->debug_mask = debug;
4653#endif
4654
f31e4b6f
AV
4655 err = ice_init_hw(hw);
4656 if (err) {
77ed84f4 4657 dev_err(dev, "ice_init_hw failed: %d\n", err);
f31e4b6f
AV
4658 err = -EIO;
4659 goto err_exit_unroll;
4660 }
4661
40b24760
AV
4662 ice_init_feature_support(pf);
4663
462acf6a
TN
4664 ice_request_fw(pf);
4665
4666 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4667 * set in pf->state, which will cause ice_is_safe_mode to return
4668 * true
4669 */
4670 if (ice_is_safe_mode(pf)) {
462acf6a
TN
4671 /* we already got function/device capabilities but these don't
4672 * reflect what the driver needs to do in safe mode. Instead of
4673 * adding conditional logic everywhere to ignore these
4674 * device/function capabilities, override them.
4675 */
4676 ice_set_safe_mode_caps(hw);
4677 }
4678
5c8e3c7f
AV
4679 hw->ucast_shared = true;
4680
78b5713a
AV
4681 err = ice_init_pf(pf);
4682 if (err) {
4683 dev_err(dev, "ice_init_pf failed: %d\n", err);
4684 goto err_init_pf_unroll;
4685 }
940b61af 4686
dce730f1
JK
4687 ice_devlink_init_regions(pf);
4688
b20e6c17
JK
4689 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4690 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4691 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4692 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4693 i = 0;
4694 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4695 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4696 pf->hw.tnl.valid_count[TNL_VXLAN];
4697 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4698 UDP_TUNNEL_TYPE_VXLAN;
4699 i++;
4700 }
4701 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4702 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4703 pf->hw.tnl.valid_count[TNL_GENEVE];
4704 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4705 UDP_TUNNEL_TYPE_GENEVE;
4706 i++;
4707 }
4708
995c90f2 4709 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
940b61af
AV
4710 if (!pf->num_alloc_vsi) {
4711 err = -EIO;
4712 goto err_init_pf_unroll;
4713 }
b20e6c17
JK
4714 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4715 dev_warn(&pf->pdev->dev,
4716 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4717 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4718 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4719 }
940b61af 4720
77ed84f4
BA
4721 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4722 GFP_KERNEL);
940b61af
AV
4723 if (!pf->vsi) {
4724 err = -ENOMEM;
4725 goto err_init_pf_unroll;
4726 }
4727
4728 err = ice_init_interrupt_scheme(pf);
4729 if (err) {
77ed84f4 4730 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
940b61af 4731 err = -EIO;
bc3a0241 4732 goto err_init_vsi_unroll;
940b61af
AV
4733 }
4734
4735 /* In case of MSIX we are going to setup the misc vector right here
4736 * to handle admin queue events etc. In case of legacy and MSI
4737 * the misc functionality and queue processing is combined in
4738 * the same vector and that gets setup at open.
4739 */
ba880734
BC
4740 err = ice_req_irq_msix_misc(pf);
4741 if (err) {
4742 dev_err(dev, "setup of misc vector failed: %d\n", err);
4743 goto err_init_interrupt_unroll;
940b61af
AV
4744 }
4745
4746 /* create switch struct for the switch element created by FW on boot */
77ed84f4 4747 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
940b61af
AV
4748 if (!pf->first_sw) {
4749 err = -ENOMEM;
4750 goto err_msix_misc_unroll;
4751 }
4752
b1edc14a
MFIP
4753 if (hw->evb_veb)
4754 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4755 else
4756 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4757
940b61af
AV
4758 pf->first_sw->pf = pf;
4759
4760 /* record the sw_id available for later use */
4761 pf->first_sw->sw_id = hw->port_info->sw_id;
4762
3a858ba3
AV
4763 err = ice_setup_pf_sw(pf);
4764 if (err) {
4015d11e 4765 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
3a858ba3
AV
4766 goto err_alloc_sw_unroll;
4767 }
9daf8208 4768
7e408e07 4769 clear_bit(ICE_SERVICE_DIS, pf->state);
9daf8208 4770
e3710a01
PSJ
4771 /* tell the firmware we are up */
4772 err = ice_send_version(pf);
4773 if (err) {
19cce2c6 4774 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
34a2a3b8 4775 UTS_RELEASE, err);
78116e97 4776 goto err_send_version_unroll;
e3710a01
PSJ
4777 }
4778
9daf8208
AV
4779 /* since everything is good, start the service timer */
4780 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4781
250c3b3e
BC
4782 err = ice_init_link_events(pf->hw.port_info);
4783 if (err) {
4784 dev_err(dev, "ice_init_link_events failed: %d\n", err);
78116e97 4785 goto err_send_version_unroll;
250c3b3e
BC
4786 }
4787
08771bce 4788 /* not a fatal error if this fails */
1a3571b5 4789 err = ice_init_nvm_phy_type(pf->hw.port_info);
08771bce 4790 if (err)
1a3571b5 4791 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
1a3571b5 4792
08771bce 4793 /* not a fatal error if this fails */
1a3571b5 4794 err = ice_update_link_info(pf->hw.port_info);
08771bce 4795 if (err)
1a3571b5 4796 dev_err(dev, "ice_update_link_info failed: %d\n", err);
1a3571b5 4797
ea78ce4d
PG
4798 ice_init_link_dflt_override(pf->hw.port_info);
4799
99d40752
BC
4800 ice_check_link_cfg_err(pf,
4801 pf->hw.port_info->phy.link_info.link_cfg_err);
c77849f5 4802
1a3571b5
PG
4803 /* if media available, initialize PHY settings */
4804 if (pf->hw.port_info->phy.link_info.link_info &
4805 ICE_AQ_MEDIA_AVAILABLE) {
08771bce 4806 /* not a fatal error if this fails */
1a3571b5 4807 err = ice_init_phy_user_cfg(pf->hw.port_info);
08771bce 4808 if (err)
1a3571b5 4809 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
1a3571b5
PG
4810
4811 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4812 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4813
4814 if (vsi)
4815 ice_configure_phy(vsi);
4816 }
4817 } else {
4818 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4819 }
4820
c585ea42
BC
4821 ice_verify_cacheline_size(pf);
4822
769c500d
AA
4823 /* Save wakeup reason register for later use */
4824 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4825
4826 /* check for a power management event */
4827 ice_print_wake_reason(pf);
4828
4829 /* clear wake status, all bits */
4830 wr32(hw, PFPM_WUS, U32_MAX);
4831
4832 /* Disable WoL at init, wait for user to enable */
4833 device_set_wakeup_enable(dev, false);
4834
cd1f56f4
BC
4835 if (ice_is_safe_mode(pf)) {
4836 ice_set_safe_mode_vlan_cfg(pf);
de75135b 4837 goto probe_done;
cd1f56f4 4838 }
462acf6a
TN
4839
4840 /* initialize DDP driven features */
06c16d89
JK
4841 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4842 ice_ptp_init(pf);
462acf6a 4843
43113ff7
KK
4844 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4845 ice_gnss_init(pf);
4846
148beb61
HT
4847 /* Note: Flow director init failure is non-fatal to load */
4848 if (ice_init_fdir(pf))
4849 dev_err(dev, "could not initialize flow director\n");
4850
462acf6a
TN
4851 /* Note: DCB init failure is non-fatal to load */
4852 if (ice_init_pf_dcb(pf, false)) {
4853 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4854 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4855 } else {
4856 ice_cfg_lldp_mib_change(&pf->hw, true);
4857 }
4858
df006dd4
DE
4859 if (ice_init_lag(pf))
4860 dev_warn(dev, "Failed to init link aggregation support\n");
4861
e18ff118
PG
4862 /* print PCI link speed and width */
4863 pcie_print_link_status(pf->pdev);
4864
de75135b 4865probe_done:
1e23f076
AV
4866 err = ice_register_netdev(pf);
4867 if (err)
4868 goto err_netdev_reg;
4869
e523af4e
SS
4870 err = ice_devlink_register_params(pf);
4871 if (err)
4872 goto err_netdev_reg;
4873
de75135b 4874 /* ready to go, so clear down state bit */
7e408e07 4875 clear_bit(ICE_DOWN, pf->state);
88f62aea 4876 if (ice_is_rdma_ena(pf)) {
d25a0fc4
DE
4877 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4878 if (pf->aux_idx < 0) {
4879 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4880 err = -ENOMEM;
e523af4e 4881 goto err_devlink_reg_param;
d25a0fc4
DE
4882 }
4883
4884 err = ice_init_rdma(pf);
4885 if (err) {
4886 dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4887 err = -EIO;
4888 goto err_init_aux_unroll;
4889 }
4890 } else {
4891 dev_warn(dev, "RDMA is not supported on this device\n");
4892 }
4893
838cefd5 4894 ice_devlink_register(pf);
837f08fd 4895 return 0;
f31e4b6f 4896
d25a0fc4
DE
4897err_init_aux_unroll:
4898 pf->adev = NULL;
4899 ida_free(&ice_aux_ida, pf->aux_idx);
e523af4e
SS
4900err_devlink_reg_param:
4901 ice_devlink_unregister_params(pf);
1e23f076 4902err_netdev_reg:
78116e97
MS
4903err_send_version_unroll:
4904 ice_vsi_release_all(pf);
3a858ba3 4905err_alloc_sw_unroll:
7e408e07
AV
4906 set_bit(ICE_SERVICE_DIS, pf->state);
4907 set_bit(ICE_DOWN, pf->state);
4015d11e 4908 devm_kfree(dev, pf->first_sw);
940b61af
AV
4909err_msix_misc_unroll:
4910 ice_free_irq_msix_misc(pf);
4911err_init_interrupt_unroll:
4912 ice_clear_interrupt_scheme(pf);
bc3a0241 4913err_init_vsi_unroll:
77ed84f4 4914 devm_kfree(dev, pf->vsi);
940b61af
AV
4915err_init_pf_unroll:
4916 ice_deinit_pf(pf);
dce730f1 4917 ice_devlink_destroy_regions(pf);
940b61af 4918 ice_deinit_hw(hw);
f31e4b6f
AV
4919err_exit_unroll:
4920 pci_disable_pcie_error_reporting(pdev);
769c500d 4921 pci_disable_device(pdev);
f31e4b6f 4922 return err;
837f08fd
AV
4923}
4924
769c500d
AA
4925/**
4926 * ice_set_wake - enable or disable Wake on LAN
4927 * @pf: pointer to the PF struct
4928 *
4929 * Simple helper for WoL control
4930 */
4931static void ice_set_wake(struct ice_pf *pf)
4932{
4933 struct ice_hw *hw = &pf->hw;
4934 bool wol = pf->wol_ena;
4935
4936 /* clear wake state, otherwise new wake events won't fire */
4937 wr32(hw, PFPM_WUS, U32_MAX);
4938
4939 /* enable / disable APM wake up, no RMW needed */
4940 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4941
4942 /* set magic packet filter enabled */
4943 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4944}
4945
4946/**
ef860480 4947 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
769c500d
AA
4948 * @pf: pointer to the PF struct
4949 *
4950 * Issue firmware command to enable multicast magic wake, making
4951 * sure that any locally administered address (LAA) is used for
4952 * wake, and that PF reset doesn't undo the LAA.
4953 */
4954static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4955{
4956 struct device *dev = ice_pf_to_dev(pf);
4957 struct ice_hw *hw = &pf->hw;
769c500d
AA
4958 u8 mac_addr[ETH_ALEN];
4959 struct ice_vsi *vsi;
5518ac2a 4960 int status;
769c500d
AA
4961 u8 flags;
4962
4963 if (!pf->wol_ena)
4964 return;
4965
4966 vsi = ice_get_main_vsi(pf);
4967 if (!vsi)
4968 return;
4969
4970 /* Get current MAC address in case it's an LAA */
4971 if (vsi->netdev)
4972 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4973 else
4974 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4975
4976 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4977 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4978 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4979
4980 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4981 if (status)
5f87ec48 4982 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5518ac2a 4983 status, ice_aq_str(hw->adminq.sq_last_status));
769c500d
AA
4984}
4985
837f08fd
AV
4986/**
4987 * ice_remove - Device removal routine
4988 * @pdev: PCI device information struct
4989 */
4990static void ice_remove(struct pci_dev *pdev)
4991{
4992 struct ice_pf *pf = pci_get_drvdata(pdev);
81b23589 4993 int i;
837f08fd 4994
838cefd5 4995 ice_devlink_unregister(pf);
afd9d4ab
AV
4996 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4997 if (!ice_is_reset_in_progress(pf->state))
4998 break;
4999 msleep(100);
5000 }
5001
195bb48f
MS
5002 ice_tc_indir_block_remove(pf);
5003
f844d521 5004 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
7e408e07 5005 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
f844d521
BC
5006 ice_free_vfs(pf);
5007 }
5008
8d81fa55 5009 ice_service_task_stop(pf);
f31e4b6f 5010
d69ea414 5011 ice_aq_cancel_waiting_tasks(pf);
f9f5301e 5012 ice_unplug_aux_dev(pf);
73e30a62
DE
5013 if (pf->aux_idx >= 0)
5014 ida_free(&ice_aux_ida, pf->aux_idx);
e523af4e 5015 ice_devlink_unregister_params(pf);
f9f5301e 5016 set_bit(ICE_DOWN, pf->state);
d69ea414 5017
df006dd4 5018 ice_deinit_lag(pf);
06c16d89
JK
5019 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
5020 ice_ptp_release(pf);
43113ff7
KK
5021 if (ice_is_feature_supported(pf, ICE_F_GNSS))
5022 ice_gnss_exit(pf);
28bf2672
BC
5023 if (!ice_is_safe_mode(pf))
5024 ice_remove_arfs(pf);
769c500d 5025 ice_setup_mc_magic_wake(pf);
0f9d5027 5026 ice_vsi_release_all(pf);
1b4ae7d9 5027 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
769c500d 5028 ice_set_wake(pf);
940b61af 5029 ice_free_irq_msix_misc(pf);
81b23589
DE
5030 ice_for_each_vsi(pf, i) {
5031 if (!pf->vsi[i])
5032 continue;
5033 ice_vsi_free_q_vectors(pf->vsi[i]);
5034 }
940b61af 5035 ice_deinit_pf(pf);
dce730f1 5036 ice_devlink_destroy_regions(pf);
f31e4b6f 5037 ice_deinit_hw(&pf->hw);
1adf7ead 5038
18057cb3
BA
5039 /* Issue a PFR as part of the prescribed driver unload flow. Do not
5040 * do it via ice_schedule_reset() since there is no need to rebuild
5041 * and the service task is already stopped.
5042 */
5043 ice_reset(&pf->hw, ICE_RESET_PFR);
c6012ac1
BA
5044 pci_wait_for_pending_transaction(pdev);
5045 ice_clear_interrupt_scheme(pf);
837f08fd 5046 pci_disable_pcie_error_reporting(pdev);
769c500d
AA
5047 pci_disable_device(pdev);
5048}
5049
5050/**
5051 * ice_shutdown - PCI callback for shutting down device
5052 * @pdev: PCI device information struct
5053 */
5054static void ice_shutdown(struct pci_dev *pdev)
5055{
5056 struct ice_pf *pf = pci_get_drvdata(pdev);
5057
5058 ice_remove(pdev);
5059
5060 if (system_state == SYSTEM_POWER_OFF) {
5061 pci_wake_from_d3(pdev, pf->wol_ena);
5062 pci_set_power_state(pdev, PCI_D3hot);
5063 }
837f08fd
AV
5064}
5065
769c500d
AA
5066#ifdef CONFIG_PM
5067/**
5068 * ice_prepare_for_shutdown - prep for PCI shutdown
5069 * @pf: board private structure
5070 *
5071 * Inform or close all dependent features in prep for PCI device shutdown
5072 */
5073static void ice_prepare_for_shutdown(struct ice_pf *pf)
5074{
5075 struct ice_hw *hw = &pf->hw;
5076 u32 v;
5077
5078 /* Notify VFs of impending reset */
5079 if (ice_check_sq_alive(hw, &hw->mailboxq))
5080 ice_vc_notify_reset(pf);
5081
5082 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5083
5084 /* disable the VSIs and their queues that are not already DOWN */
5085 ice_pf_dis_all_vsi(pf, false);
5086
5087 ice_for_each_vsi(pf, v)
5088 if (pf->vsi[v])
5089 pf->vsi[v]->vsi_num = 0;
5090
5091 ice_shutdown_all_ctrlq(hw);
5092}
5093
5094/**
5095 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5096 * @pf: board private structure to reinitialize
5097 *
5098 * This routine reinitialize interrupt scheme that was cleared during
5099 * power management suspend callback.
5100 *
5101 * This should be called during resume routine to re-allocate the q_vectors
5102 * and reacquire interrupts.
5103 */
5104static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5105{
5106 struct device *dev = ice_pf_to_dev(pf);
5107 int ret, v;
5108
5109 /* Since we clear MSIX flag during suspend, we need to
5110 * set it back during resume...
5111 */
5112
5113 ret = ice_init_interrupt_scheme(pf);
5114 if (ret) {
5115 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5116 return ret;
5117 }
5118
5119 /* Remap vectors and rings, after successful re-init interrupts */
5120 ice_for_each_vsi(pf, v) {
5121 if (!pf->vsi[v])
5122 continue;
5123
5124 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5125 if (ret)
5126 goto err_reinit;
5127 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5128 }
5129
5130 ret = ice_req_irq_msix_misc(pf);
5131 if (ret) {
5132 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5133 ret);
5134 goto err_reinit;
5135 }
5136
5137 return 0;
5138
5139err_reinit:
5140 while (v--)
5141 if (pf->vsi[v])
5142 ice_vsi_free_q_vectors(pf->vsi[v]);
5143
5144 return ret;
5145}
5146
5147/**
5148 * ice_suspend
5149 * @dev: generic device information structure
5150 *
5151 * Power Management callback to quiesce the device and prepare
5152 * for D3 transition.
5153 */
65c72291 5154static int __maybe_unused ice_suspend(struct device *dev)
769c500d
AA
5155{
5156 struct pci_dev *pdev = to_pci_dev(dev);
5157 struct ice_pf *pf;
5158 int disabled, v;
5159
5160 pf = pci_get_drvdata(pdev);
5161
5162 if (!ice_pf_state_is_nominal(pf)) {
5163 dev_err(dev, "Device is not ready, no need to suspend it\n");
5164 return -EBUSY;
5165 }
5166
5167 /* Stop watchdog tasks until resume completion.
5168 * Even though it is most likely that the service task is
5169 * disabled if the device is suspended or down, the service task's
5170 * state is controlled by a different state bit, and we should
5171 * store and honor whatever state that bit is in at this point.
5172 */
5173 disabled = ice_service_task_stop(pf);
5174
f9f5301e
DE
5175 ice_unplug_aux_dev(pf);
5176
769c500d 5177 /* Already suspended?, then there is nothing to do */
7e408e07 5178 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
769c500d
AA
5179 if (!disabled)
5180 ice_service_task_restart(pf);
5181 return 0;
5182 }
5183
7e408e07 5184 if (test_bit(ICE_DOWN, pf->state) ||
769c500d
AA
5185 ice_is_reset_in_progress(pf->state)) {
5186 dev_err(dev, "can't suspend device in reset or already down\n");
5187 if (!disabled)
5188 ice_service_task_restart(pf);
5189 return 0;
5190 }
5191
5192 ice_setup_mc_magic_wake(pf);
5193
5194 ice_prepare_for_shutdown(pf);
5195
5196 ice_set_wake(pf);
5197
5198 /* Free vectors, clear the interrupt scheme and release IRQs
5199 * for proper hibernation, especially with large number of CPUs.
5200 * Otherwise hibernation might fail when mapping all the vectors back
5201 * to CPU0.
5202 */
5203 ice_free_irq_msix_misc(pf);
5204 ice_for_each_vsi(pf, v) {
5205 if (!pf->vsi[v])
5206 continue;
5207 ice_vsi_free_q_vectors(pf->vsi[v]);
5208 }
5209 ice_clear_interrupt_scheme(pf);
5210
466e4392 5211 pci_save_state(pdev);
769c500d
AA
5212 pci_wake_from_d3(pdev, pf->wol_ena);
5213 pci_set_power_state(pdev, PCI_D3hot);
5214 return 0;
5215}
5216
5217/**
5218 * ice_resume - PM callback for waking up from D3
5219 * @dev: generic device information structure
5220 */
65c72291 5221static int __maybe_unused ice_resume(struct device *dev)
769c500d
AA
5222{
5223 struct pci_dev *pdev = to_pci_dev(dev);
5224 enum ice_reset_req reset_type;
5225 struct ice_pf *pf;
5226 struct ice_hw *hw;
5227 int ret;
5228
5229 pci_set_power_state(pdev, PCI_D0);
5230 pci_restore_state(pdev);
5231 pci_save_state(pdev);
5232
5233 if (!pci_device_is_present(pdev))
5234 return -ENODEV;
5235
5236 ret = pci_enable_device_mem(pdev);
5237 if (ret) {
5238 dev_err(dev, "Cannot enable device after suspend\n");
5239 return ret;
5240 }
5241
5242 pf = pci_get_drvdata(pdev);
5243 hw = &pf->hw;
5244
5245 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5246 ice_print_wake_reason(pf);
5247
5248 /* We cleared the interrupt scheme when we suspended, so we need to
5249 * restore it now to resume device functionality.
5250 */
5251 ret = ice_reinit_interrupt_scheme(pf);
5252 if (ret)
5253 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5254
7e408e07 5255 clear_bit(ICE_DOWN, pf->state);
769c500d
AA
5256 /* Now perform PF reset and rebuild */
5257 reset_type = ICE_RESET_PFR;
5258 /* re-enable service task for reset, but allow reset to schedule it */
7e408e07 5259 clear_bit(ICE_SERVICE_DIS, pf->state);
769c500d
AA
5260
5261 if (ice_schedule_reset(pf, reset_type))
5262 dev_err(dev, "Reset during resume failed.\n");
5263
7e408e07 5264 clear_bit(ICE_SUSPENDED, pf->state);
769c500d
AA
5265 ice_service_task_restart(pf);
5266
5267 /* Restart the service task */
5268 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5269
5270 return 0;
5271}
5272#endif /* CONFIG_PM */
5273
5995b6d0
BC
5274/**
5275 * ice_pci_err_detected - warning that PCI error has been detected
5276 * @pdev: PCI device information struct
5277 * @err: the type of PCI error
5278 *
5279 * Called to warn that something happened on the PCI bus and the error handling
5280 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5281 */
5282static pci_ers_result_t
16d79cd4 5283ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5995b6d0
BC
5284{
5285 struct ice_pf *pf = pci_get_drvdata(pdev);
5286
5287 if (!pf) {
5288 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5289 __func__, err);
5290 return PCI_ERS_RESULT_DISCONNECT;
5291 }
5292
7e408e07 5293 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5995b6d0
BC
5294 ice_service_task_stop(pf);
5295
7e408e07
AV
5296 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5297 set_bit(ICE_PFR_REQ, pf->state);
fbc7b27a 5298 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5995b6d0
BC
5299 }
5300 }
5301
5302 return PCI_ERS_RESULT_NEED_RESET;
5303}
5304
5305/**
5306 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5307 * @pdev: PCI device information struct
5308 *
5309 * Called to determine if the driver can recover from the PCI slot reset by
5310 * using a register read to determine if the device is recoverable.
5311 */
5312static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5313{
5314 struct ice_pf *pf = pci_get_drvdata(pdev);
5315 pci_ers_result_t result;
5316 int err;
5317 u32 reg;
5318
5319 err = pci_enable_device_mem(pdev);
5320 if (err) {
19cce2c6 5321 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5995b6d0
BC
5322 err);
5323 result = PCI_ERS_RESULT_DISCONNECT;
5324 } else {
5325 pci_set_master(pdev);
5326 pci_restore_state(pdev);
5327 pci_save_state(pdev);
5328 pci_wake_from_d3(pdev, false);
5329
5330 /* Check for life */
5331 reg = rd32(&pf->hw, GLGEN_RTRIG);
5332 if (!reg)
5333 result = PCI_ERS_RESULT_RECOVERED;
5334 else
5335 result = PCI_ERS_RESULT_DISCONNECT;
5336 }
5337
5995b6d0
BC
5338 return result;
5339}
5340
5341/**
5342 * ice_pci_err_resume - restart operations after PCI error recovery
5343 * @pdev: PCI device information struct
5344 *
5345 * Called to allow the driver to bring things back up after PCI error and/or
5346 * reset recovery have finished
5347 */
5348static void ice_pci_err_resume(struct pci_dev *pdev)
5349{
5350 struct ice_pf *pf = pci_get_drvdata(pdev);
5351
5352 if (!pf) {
19cce2c6
AV
5353 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5354 __func__);
5995b6d0
BC
5355 return;
5356 }
5357
7e408e07 5358 if (test_bit(ICE_SUSPENDED, pf->state)) {
5995b6d0
BC
5359 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5360 __func__);
5361 return;
5362 }
5363
a54a0b24
NN
5364 ice_restore_all_vfs_msi_state(pdev);
5365
5995b6d0
BC
5366 ice_do_reset(pf, ICE_RESET_PFR);
5367 ice_service_task_restart(pf);
5368 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5369}
5370
5371/**
5372 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5373 * @pdev: PCI device information struct
5374 */
5375static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5376{
5377 struct ice_pf *pf = pci_get_drvdata(pdev);
5378
7e408e07 5379 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5995b6d0
BC
5380 ice_service_task_stop(pf);
5381
7e408e07
AV
5382 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5383 set_bit(ICE_PFR_REQ, pf->state);
fbc7b27a 5384 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5995b6d0
BC
5385 }
5386 }
5387}
5388
5389/**
5390 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5391 * @pdev: PCI device information struct
5392 */
5393static void ice_pci_err_reset_done(struct pci_dev *pdev)
5394{
5395 ice_pci_err_resume(pdev);
5396}
5397
837f08fd
AV
5398/* ice_pci_tbl - PCI Device ID Table
5399 *
5400 * Wildcard entries (PCI_ANY_ID) should come last
5401 * Last entry must be all 0s
5402 *
5403 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5404 * Class, Class Mask, private data (not used) }
5405 */
5406static const struct pci_device_id ice_pci_tbl[] = {
633d7449
AV
5407 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5408 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5409 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
7dcf78b8
TN
5410 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5411 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
195fb977 5412 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
e36aeec0
BA
5413 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5414 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5415 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5416 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5417 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5d9e618c
JK
5418 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5419 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5420 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5421 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5422 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
2fbfa966 5423 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5d9e618c
JK
5424 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5425 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5426 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
e36aeec0
BA
5427 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5428 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5429 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5430 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5431 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
f52d1668 5432 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
837f08fd
AV
5433 /* required last entry */
5434 { 0, }
5435};
5436MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5437
769c500d
AA
5438static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5439
5995b6d0
BC
5440static const struct pci_error_handlers ice_pci_err_handler = {
5441 .error_detected = ice_pci_err_detected,
5442 .slot_reset = ice_pci_err_slot_reset,
5443 .reset_prepare = ice_pci_err_reset_prepare,
5444 .reset_done = ice_pci_err_reset_done,
5445 .resume = ice_pci_err_resume
5446};
5447
837f08fd
AV
5448static struct pci_driver ice_driver = {
5449 .name = KBUILD_MODNAME,
5450 .id_table = ice_pci_tbl,
5451 .probe = ice_probe,
5452 .remove = ice_remove,
769c500d
AA
5453#ifdef CONFIG_PM
5454 .driver.pm = &ice_pm_ops,
5455#endif /* CONFIG_PM */
5456 .shutdown = ice_shutdown,
ddf30f7f 5457 .sriov_configure = ice_sriov_configure,
5995b6d0 5458 .err_handler = &ice_pci_err_handler
837f08fd
AV
5459};
5460
5461/**
5462 * ice_module_init - Driver registration routine
5463 *
5464 * ice_module_init is the first routine called when the driver is
5465 * loaded. All it does is register with the PCI subsystem.
5466 */
5467static int __init ice_module_init(void)
5468{
5469 int status;
5470
34a2a3b8 5471 pr_info("%s\n", ice_driver_string);
837f08fd
AV
5472 pr_info("%s\n", ice_copyright);
5473
0f9d5027 5474 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
940b61af
AV
5475 if (!ice_wq) {
5476 pr_err("Failed to create workqueue\n");
5477 return -ENOMEM;
5478 }
5479
837f08fd 5480 status = pci_register_driver(&ice_driver);
940b61af 5481 if (status) {
2f2da36e 5482 pr_err("failed to register PCI driver, err %d\n", status);
940b61af
AV
5483 destroy_workqueue(ice_wq);
5484 }
837f08fd
AV
5485
5486 return status;
5487}
5488module_init(ice_module_init);
5489
5490/**
5491 * ice_module_exit - Driver exit cleanup routine
5492 *
5493 * ice_module_exit is called just before the driver is removed
5494 * from memory.
5495 */
5496static void __exit ice_module_exit(void)
5497{
5498 pci_unregister_driver(&ice_driver);
940b61af 5499 destroy_workqueue(ice_wq);
837f08fd
AV
5500 pr_info("module unloaded\n");
5501}
5502module_exit(ice_module_exit);
3a858ba3 5503
e94d4478 5504/**
f9867df6 5505 * ice_set_mac_address - NDO callback to set MAC address
e94d4478
AV
5506 * @netdev: network interface device structure
5507 * @pi: pointer to an address structure
5508 *
5509 * Returns 0 on success, negative on failure
5510 */
5511static int ice_set_mac_address(struct net_device *netdev, void *pi)
5512{
5513 struct ice_netdev_priv *np = netdev_priv(netdev);
5514 struct ice_vsi *vsi = np->vsi;
5515 struct ice_pf *pf = vsi->back;
5516 struct ice_hw *hw = &pf->hw;
5517 struct sockaddr *addr = pi;
b357d971 5518 u8 old_mac[ETH_ALEN];
e94d4478 5519 u8 flags = 0;
e94d4478 5520 u8 *mac;
2ccc1c1c 5521 int err;
e94d4478
AV
5522
5523 mac = (u8 *)addr->sa_data;
5524
5525 if (!is_valid_ether_addr(mac))
5526 return -EADDRNOTAVAIL;
5527
5528 if (ether_addr_equal(netdev->dev_addr, mac)) {
3ba7f53f 5529 netdev_dbg(netdev, "already using mac %pM\n", mac);
e94d4478
AV
5530 return 0;
5531 }
5532
7e408e07 5533 if (test_bit(ICE_DOWN, pf->state) ||
5df7e45d 5534 ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
5535 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5536 mac);
5537 return -EBUSY;
5538 }
5539
9fea7498
KP
5540 if (ice_chnl_dmac_fltr_cnt(pf)) {
5541 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5542 mac);
5543 return -EAGAIN;
5544 }
5545
3ba7f53f 5546 netif_addr_lock_bh(netdev);
b357d971
BC
5547 ether_addr_copy(old_mac, netdev->dev_addr);
5548 /* change the netdev's MAC address */
a05e4c0a 5549 eth_hw_addr_set(netdev, mac);
b357d971
BC
5550 netif_addr_unlock_bh(netdev);
5551
757976ab 5552 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
2ccc1c1c
TN
5553 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5554 if (err && err != -ENOENT) {
e94d4478 5555 err = -EADDRNOTAVAIL;
bbb968e8 5556 goto err_update_filters;
e94d4478
AV
5557 }
5558
13ed5e8a 5559 /* Add filter for new MAC. If filter exists, return success */
2ccc1c1c 5560 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
2c0069f3 5561 if (err == -EEXIST) {
13ed5e8a
NN
5562 /* Although this MAC filter is already present in hardware it's
5563 * possible in some cases (e.g. bonding) that dev_addr was
5564 * modified outside of the driver and needs to be restored back
5565 * to this value.
5566 */
757976ab 5567 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
2c0069f3
IV
5568
5569 return 0;
5570 } else if (err) {
3ba7f53f 5571 /* error if the new filter addition failed */
757976ab 5572 err = -EADDRNOTAVAIL;
2c0069f3 5573 }
757976ab 5574
bbb968e8 5575err_update_filters:
e94d4478 5576 if (err) {
2f2da36e 5577 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
e94d4478 5578 mac);
b357d971 5579 netif_addr_lock_bh(netdev);
f3956ebb 5580 eth_hw_addr_set(netdev, old_mac);
3ba7f53f 5581 netif_addr_unlock_bh(netdev);
e94d4478
AV
5582 return err;
5583 }
5584
2f2da36e 5585 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
e94d4478
AV
5586 netdev->dev_addr);
5587
f9867df6 5588 /* write new MAC address to the firmware */
e94d4478 5589 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2ccc1c1c
TN
5590 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5591 if (err) {
5f87ec48 5592 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
2ccc1c1c 5593 mac, err);
e94d4478
AV
5594 }
5595 return 0;
5596}
5597
5598/**
5599 * ice_set_rx_mode - NDO callback to set the netdev filters
5600 * @netdev: network interface device structure
5601 */
5602static void ice_set_rx_mode(struct net_device *netdev)
5603{
5604 struct ice_netdev_priv *np = netdev_priv(netdev);
5605 struct ice_vsi *vsi = np->vsi;
5606
5607 if (!vsi)
5608 return;
5609
5610 /* Set the flags to synchronize filters
5611 * ndo_set_rx_mode may be triggered even without a change in netdev
5612 * flags
5613 */
e97fb1ae
AV
5614 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5615 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
e94d4478
AV
5616 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5617
5618 /* schedule our worker thread which will take care of
5619 * applying the new filter changes
5620 */
5621 ice_service_task_schedule(vsi->back);
5622}
5623
1ddef455
UK
5624/**
5625 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5626 * @netdev: network interface device structure
5627 * @queue_index: Queue ID
5628 * @maxrate: maximum bandwidth in Mbps
5629 */
5630static int
5631ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5632{
5633 struct ice_netdev_priv *np = netdev_priv(netdev);
5634 struct ice_vsi *vsi = np->vsi;
1ddef455 5635 u16 q_handle;
5518ac2a 5636 int status;
1ddef455
UK
5637 u8 tc;
5638
5639 /* Validate maxrate requested is within permitted range */
5640 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
19cce2c6 5641 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
1ddef455
UK
5642 maxrate, queue_index);
5643 return -EINVAL;
5644 }
5645
5646 q_handle = vsi->tx_rings[queue_index]->q_handle;
5647 tc = ice_dcb_get_tc(vsi, queue_index);
5648
5649 /* Set BW back to default, when user set maxrate to 0 */
5650 if (!maxrate)
5651 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5652 q_handle, ICE_MAX_BW);
5653 else
5654 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5655 q_handle, ICE_MAX_BW, maxrate * 1000);
c1484691 5656 if (status)
5f87ec48
TN
5657 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5658 status);
1ddef455 5659
c1484691 5660 return status;
1ddef455
UK
5661}
5662
e94d4478
AV
5663/**
5664 * ice_fdb_add - add an entry to the hardware database
5665 * @ndm: the input from the stack
5666 * @tb: pointer to array of nladdr (unused)
5667 * @dev: the net device pointer
5668 * @addr: the MAC address entry being added
f9867df6 5669 * @vid: VLAN ID
e94d4478 5670 * @flags: instructions from stack about fdb operation
99be37ed 5671 * @extack: netlink extended ack
e94d4478 5672 */
99be37ed
BA
5673static int
5674ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5675 struct net_device *dev, const unsigned char *addr, u16 vid,
5676 u16 flags, struct netlink_ext_ack __always_unused *extack)
e94d4478
AV
5677{
5678 int err;
5679
5680 if (vid) {
5681 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5682 return -EINVAL;
5683 }
5684 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5685 netdev_err(dev, "FDB only supports static addresses\n");
5686 return -EINVAL;
5687 }
5688
5689 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5690 err = dev_uc_add_excl(dev, addr);
5691 else if (is_multicast_ether_addr(addr))
5692 err = dev_mc_add_excl(dev, addr);
5693 else
5694 err = -EINVAL;
5695
5696 /* Only return duplicate errors if NLM_F_EXCL is set */
5697 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5698 err = 0;
5699
5700 return err;
5701}
5702
5703/**
5704 * ice_fdb_del - delete an entry from the hardware database
5705 * @ndm: the input from the stack
5706 * @tb: pointer to array of nladdr (unused)
5707 * @dev: the net device pointer
5708 * @addr: the MAC address entry being added
f9867df6 5709 * @vid: VLAN ID
ca4567f1 5710 * @extack: netlink extended ack
e94d4478 5711 */
c8b7abdd
BA
5712static int
5713ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5714 struct net_device *dev, const unsigned char *addr,
ca4567f1 5715 __always_unused u16 vid, struct netlink_ext_ack *extack)
e94d4478
AV
5716{
5717 int err;
5718
5719 if (ndm->ndm_state & NUD_PERMANENT) {
5720 netdev_err(dev, "FDB only supports static addresses\n");
5721 return -EINVAL;
5722 }
5723
5724 if (is_unicast_ether_addr(addr))
5725 err = dev_uc_del(dev, addr);
5726 else if (is_multicast_ether_addr(addr))
5727 err = dev_mc_del(dev, addr);
5728 else
5729 err = -EINVAL;
5730
5731 return err;
5732}
5733
1babaf77
BC
5734#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5735 NETIF_F_HW_VLAN_CTAG_TX | \
5736 NETIF_F_HW_VLAN_STAG_RX | \
5737 NETIF_F_HW_VLAN_STAG_TX)
5738
5739#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5740 NETIF_F_HW_VLAN_STAG_FILTER)
5741
5742/**
5743 * ice_fix_features - fix the netdev features flags based on device limitations
5744 * @netdev: ptr to the netdev that flags are being fixed on
5745 * @features: features that need to be checked and possibly fixed
5746 *
5747 * Make sure any fixups are made to features in this callback. This enables the
5748 * driver to not have to check unsupported configurations throughout the driver
5749 * because that's the responsiblity of this callback.
5750 *
5751 * Single VLAN Mode (SVM) Supported Features:
5752 * NETIF_F_HW_VLAN_CTAG_FILTER
5753 * NETIF_F_HW_VLAN_CTAG_RX
5754 * NETIF_F_HW_VLAN_CTAG_TX
5755 *
5756 * Double VLAN Mode (DVM) Supported Features:
5757 * NETIF_F_HW_VLAN_CTAG_FILTER
5758 * NETIF_F_HW_VLAN_CTAG_RX
5759 * NETIF_F_HW_VLAN_CTAG_TX
5760 *
5761 * NETIF_F_HW_VLAN_STAG_FILTER
5762 * NETIF_HW_VLAN_STAG_RX
5763 * NETIF_HW_VLAN_STAG_TX
5764 *
5765 * Features that need fixing:
5766 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5767 * These are mutually exlusive as the VSI context cannot support multiple
5768 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5769 * is not done, then default to clearing the requested STAG offload
5770 * settings.
5771 *
5772 * All supported filtering has to be enabled or disabled together. For
5773 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5774 * together. If this is not done, then default to VLAN filtering disabled.
5775 * These are mutually exclusive as there is currently no way to
5776 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5777 * prune rules.
5778 */
5779static netdev_features_t
5780ice_fix_features(struct net_device *netdev, netdev_features_t features)
5781{
5782 struct ice_netdev_priv *np = netdev_priv(netdev);
9542ef4f
RS
5783 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5784 bool cur_ctag, cur_stag, req_ctag, req_stag;
5785
5786 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5787 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5788 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5789
5790 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5791 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5792 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5793
5794 if (req_vlan_fltr != cur_vlan_fltr) {
5795 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5796 if (req_ctag && req_stag) {
5797 features |= NETIF_VLAN_FILTERING_FEATURES;
5798 } else if (!req_ctag && !req_stag) {
5799 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5800 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5801 (!cur_stag && req_stag && !cur_ctag)) {
5802 features |= NETIF_VLAN_FILTERING_FEATURES;
5803 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5804 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5805 (cur_stag && !req_stag && cur_ctag)) {
5806 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5807 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5808 }
1babaf77 5809 } else {
9542ef4f
RS
5810 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5811 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5812
5813 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5814 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1babaf77
BC
5815 }
5816 }
5817
5818 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5819 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5820 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5821 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5822 NETIF_F_HW_VLAN_STAG_TX);
5823 }
5824
5825 return features;
5826}
5827
5828/**
5829 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5830 * @vsi: PF's VSI
5831 * @features: features used to determine VLAN offload settings
5832 *
5833 * First, determine the vlan_ethertype based on the VLAN offload bits in
5834 * features. Then determine if stripping and insertion should be enabled or
5835 * disabled. Finally enable or disable VLAN stripping and insertion.
5836 */
5837static int
5838ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5839{
5840 bool enable_stripping = true, enable_insertion = true;
5841 struct ice_vsi_vlan_ops *vlan_ops;
5842 int strip_err = 0, insert_err = 0;
5843 u16 vlan_ethertype = 0;
5844
5845 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5846
5847 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5848 vlan_ethertype = ETH_P_8021AD;
5849 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5850 vlan_ethertype = ETH_P_8021Q;
5851
5852 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5853 enable_stripping = false;
5854 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5855 enable_insertion = false;
5856
5857 if (enable_stripping)
5858 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5859 else
5860 strip_err = vlan_ops->dis_stripping(vsi);
5861
5862 if (enable_insertion)
5863 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5864 else
5865 insert_err = vlan_ops->dis_insertion(vsi);
5866
5867 if (strip_err || insert_err)
5868 return -EIO;
5869
5870 return 0;
5871}
5872
5873/**
5874 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5875 * @vsi: PF's VSI
5876 * @features: features used to determine VLAN filtering settings
5877 *
5878 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
5879 * features.
5880 */
5881static int
5882ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5883{
5884 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5885 int err = 0;
5886
5887 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
5888 * if either bit is set
5889 */
5890 if (features &
5891 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5892 err = vlan_ops->ena_rx_filtering(vsi);
5893 else
5894 err = vlan_ops->dis_rx_filtering(vsi);
5895
5896 return err;
5897}
5898
5899/**
5900 * ice_set_vlan_features - set VLAN settings based on suggested feature set
5901 * @netdev: ptr to the netdev being adjusted
5902 * @features: the feature set that the stack is suggesting
5903 *
5904 * Only update VLAN settings if the requested_vlan_features are different than
5905 * the current_vlan_features.
5906 */
5907static int
5908ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
5909{
5910 netdev_features_t current_vlan_features, requested_vlan_features;
5911 struct ice_netdev_priv *np = netdev_priv(netdev);
5912 struct ice_vsi *vsi = np->vsi;
5913 int err;
5914
5915 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
5916 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
5917 if (current_vlan_features ^ requested_vlan_features) {
5918 err = ice_set_vlan_offload_features(vsi, features);
5919 if (err)
5920 return err;
5921 }
5922
5923 current_vlan_features = netdev->features &
5924 NETIF_VLAN_FILTERING_FEATURES;
5925 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
5926 if (current_vlan_features ^ requested_vlan_features) {
5927 err = ice_set_vlan_filtering_features(vsi, features);
5928 if (err)
5929 return err;
5930 }
5931
5932 return 0;
5933}
5934
44ece4e1
MF
5935/**
5936 * ice_set_loopback - turn on/off loopback mode on underlying PF
5937 * @vsi: ptr to VSI
5938 * @ena: flag to indicate the on/off setting
5939 */
5940static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
5941{
5942 bool if_running = netif_running(vsi->netdev);
5943 int ret;
5944
5945 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
5946 ret = ice_down(vsi);
5947 if (ret) {
5948 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
5949 return ret;
5950 }
5951 }
5952 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
5953 if (ret)
5954 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
5955 if (if_running)
5956 ret = ice_up(vsi);
5957
5958 return ret;
5959}
5960
d76a60ba
AV
5961/**
5962 * ice_set_features - set the netdev feature flags
5963 * @netdev: ptr to the netdev being adjusted
5964 * @features: the feature set that the stack is suggesting
5965 */
c8b7abdd
BA
5966static int
5967ice_set_features(struct net_device *netdev, netdev_features_t features)
d76a60ba 5968{
c67672fa 5969 netdev_features_t changed = netdev->features ^ features;
d76a60ba
AV
5970 struct ice_netdev_priv *np = netdev_priv(netdev);
5971 struct ice_vsi *vsi = np->vsi;
5f8cc355 5972 struct ice_pf *pf = vsi->back;
d76a60ba
AV
5973 int ret = 0;
5974
462acf6a 5975 /* Don't set any netdev advanced features with device in Safe Mode */
c67672fa
MF
5976 if (ice_is_safe_mode(pf)) {
5977 dev_err(ice_pf_to_dev(pf),
5978 "Device is in Safe Mode - not enabling advanced netdev features\n");
462acf6a
TN
5979 return ret;
5980 }
5981
5f8cc355
HT
5982 /* Do not change setting during reset */
5983 if (ice_is_reset_in_progress(pf->state)) {
c67672fa
MF
5984 dev_err(ice_pf_to_dev(pf),
5985 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5f8cc355
HT
5986 return -EBUSY;
5987 }
5988
8f529ff9
TN
5989 /* Multiple features can be changed in one call so keep features in
5990 * separate if/else statements to guarantee each feature is checked
5991 */
c67672fa
MF
5992 if (changed & NETIF_F_RXHASH)
5993 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
492af0ab 5994
1babaf77
BC
5995 ret = ice_set_vlan_features(netdev, features);
5996 if (ret)
5997 return ret;
3171948e 5998
c67672fa
MF
5999 if (changed & NETIF_F_NTUPLE) {
6000 bool ena = !!(features & NETIF_F_NTUPLE);
6001
6002 ice_vsi_manage_fdir(vsi, ena);
6003 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
28bf2672 6004 }
148beb61 6005
fbc7b27a
KP
6006 /* don't turn off hw_tc_offload when ADQ is already enabled */
6007 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6008 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6009 return -EACCES;
6010 }
9fea7498 6011
c67672fa
MF
6012 if (changed & NETIF_F_HW_TC) {
6013 bool ena = !!(features & NETIF_F_HW_TC);
9fea7498 6014
c67672fa
MF
6015 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6016 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6017 }
9fea7498 6018
44ece4e1
MF
6019 if (changed & NETIF_F_LOOPBACK)
6020 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6021
6022 return ret;
d76a60ba
AV
6023}
6024
6025/**
c31af68a 6026 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
f9867df6 6027 * @vsi: VSI to setup VLAN properties for
d76a60ba
AV
6028 */
6029static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6030{
1babaf77 6031 int err;
d76a60ba 6032
1babaf77
BC
6033 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6034 if (err)
6035 return err;
d76a60ba 6036
1babaf77
BC
6037 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6038 if (err)
6039 return err;
d76a60ba 6040
c31af68a 6041 return ice_vsi_add_vlan_zero(vsi);
d76a60ba
AV
6042}
6043
cdedef59
AV
6044/**
6045 * ice_vsi_cfg - Setup the VSI
6046 * @vsi: the VSI being configured
6047 *
6048 * Return 0 on success and negative value on error
6049 */
0e674aeb 6050int ice_vsi_cfg(struct ice_vsi *vsi)
cdedef59
AV
6051{
6052 int err;
6053
c7f2c42b
AV
6054 if (vsi->netdev) {
6055 ice_set_rx_mode(vsi->netdev);
9ecd25c2 6056
cc019545
MF
6057 if (vsi->type != ICE_VSI_LB) {
6058 err = ice_vsi_vlan_setup(vsi);
9ecd25c2 6059
cc019545
MF
6060 if (err)
6061 return err;
6062 }
c7f2c42b 6063 }
a629cf0a 6064 ice_vsi_cfg_dcb_rings(vsi);
03f7a986
AV
6065
6066 err = ice_vsi_cfg_lan_txqs(vsi);
efc2214b
MF
6067 if (!err && ice_is_xdp_ena_vsi(vsi))
6068 err = ice_vsi_cfg_xdp_txqs(vsi);
cdedef59
AV
6069 if (!err)
6070 err = ice_vsi_cfg_rxqs(vsi);
6071
6072 return err;
6073}
6074
cdf1f1f1 6075/* THEORY OF MODERATION:
d8eb7ad5 6076 * The ice driver hardware works differently than the hardware that DIMLIB was
cdf1f1f1
JK
6077 * originally made for. ice hardware doesn't have packet count limits that
6078 * can trigger an interrupt, but it *does* have interrupt rate limit support,
d8eb7ad5
JB
6079 * which is hard-coded to a limit of 250,000 ints/second.
6080 * If not using dynamic moderation, the INTRL value can be modified
6081 * by ethtool rx-usecs-high.
cdf1f1f1
JK
6082 */
6083struct ice_dim {
6084 /* the throttle rate for interrupts, basically worst case delay before
6085 * an initial interrupt fires, value is stored in microseconds.
6086 */
6087 u16 itr;
cdf1f1f1
JK
6088};
6089
6090/* Make a different profile for Rx that doesn't allow quite so aggressive
d8eb7ad5
JB
6091 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6092 * second.
cdf1f1f1
JK
6093 */
6094static const struct ice_dim rx_profile[] = {
d8eb7ad5
JB
6095 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6096 {8}, /* 125,000 ints/s */
6097 {16}, /* 62,500 ints/s */
6098 {62}, /* 16,129 ints/s */
6099 {126} /* 7,936 ints/s */
cdf1f1f1
JK
6100};
6101
6102/* The transmit profile, which has the same sorts of values
6103 * as the previous struct
6104 */
6105static const struct ice_dim tx_profile[] = {
d8eb7ad5
JB
6106 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6107 {8}, /* 125,000 ints/s */
6108 {40}, /* 16,125 ints/s */
6109 {128}, /* 7,812 ints/s */
6110 {256} /* 3,906 ints/s */
cdf1f1f1
JK
6111};
6112
6113static void ice_tx_dim_work(struct work_struct *work)
6114{
6115 struct ice_ring_container *rc;
cdf1f1f1 6116 struct dim *dim;
d8eb7ad5 6117 u16 itr;
cdf1f1f1
JK
6118
6119 dim = container_of(work, struct dim, work);
d8eb7ad5 6120 rc = (struct ice_ring_container *)dim->priv;
cdf1f1f1 6121
d8eb7ad5 6122 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
cdf1f1f1
JK
6123
6124 /* look up the values in our local table */
6125 itr = tx_profile[dim->profile_ix].itr;
cdf1f1f1 6126
d8eb7ad5 6127 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
cdf1f1f1 6128 ice_write_itr(rc, itr);
cdf1f1f1
JK
6129
6130 dim->state = DIM_START_MEASURE;
6131}
6132
6133static void ice_rx_dim_work(struct work_struct *work)
6134{
6135 struct ice_ring_container *rc;
cdf1f1f1 6136 struct dim *dim;
d8eb7ad5 6137 u16 itr;
cdf1f1f1
JK
6138
6139 dim = container_of(work, struct dim, work);
d8eb7ad5 6140 rc = (struct ice_ring_container *)dim->priv;
cdf1f1f1 6141
d8eb7ad5 6142 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
cdf1f1f1
JK
6143
6144 /* look up the values in our local table */
6145 itr = rx_profile[dim->profile_ix].itr;
cdf1f1f1 6146
d8eb7ad5 6147 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
cdf1f1f1 6148 ice_write_itr(rc, itr);
cdf1f1f1
JK
6149
6150 dim->state = DIM_START_MEASURE;
6151}
6152
d8eb7ad5
JB
6153#define ICE_DIM_DEFAULT_PROFILE_IX 1
6154
6155/**
6156 * ice_init_moderation - set up interrupt moderation
6157 * @q_vector: the vector containing rings to be configured
6158 *
6159 * Set up interrupt moderation registers, with the intent to do the right thing
6160 * when called from reset or from probe, and whether or not dynamic moderation
6161 * is enabled or not. Take special care to write all the registers in both
6162 * dynamic moderation mode or not in order to make sure hardware is in a known
6163 * state.
6164 */
6165static void ice_init_moderation(struct ice_q_vector *q_vector)
6166{
6167 struct ice_ring_container *rc;
6168 bool tx_dynamic, rx_dynamic;
6169
6170 rc = &q_vector->tx;
6171 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6172 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6173 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6174 rc->dim.priv = rc;
6175 tx_dynamic = ITR_IS_DYNAMIC(rc);
6176
6177 /* set the initial TX ITR to match the above */
6178 ice_write_itr(rc, tx_dynamic ?
6179 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6180
6181 rc = &q_vector->rx;
6182 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6183 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6184 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6185 rc->dim.priv = rc;
6186 rx_dynamic = ITR_IS_DYNAMIC(rc);
6187
6188 /* set the initial RX ITR to match the above */
6189 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6190 rc->itr_setting);
6191
6192 ice_set_q_vector_intrl(q_vector);
6193}
6194
2b245cb2
AV
6195/**
6196 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6197 * @vsi: the VSI being configured
6198 */
6199static void ice_napi_enable_all(struct ice_vsi *vsi)
6200{
6201 int q_idx;
6202
6203 if (!vsi->netdev)
6204 return;
6205
b4603dbf 6206 ice_for_each_q_vector(vsi, q_idx) {
eec90376
YX
6207 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6208
d8eb7ad5 6209 ice_init_moderation(q_vector);
cdf1f1f1 6210
e72bba21 6211 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
eec90376
YX
6212 napi_enable(&q_vector->napi);
6213 }
2b245cb2
AV
6214}
6215
cdedef59
AV
6216/**
6217 * ice_up_complete - Finish the last steps of bringing up a connection
6218 * @vsi: The VSI being configured
6219 *
6220 * Return 0 on success and negative value on error
6221 */
6222static int ice_up_complete(struct ice_vsi *vsi)
6223{
6224 struct ice_pf *pf = vsi->back;
6225 int err;
6226
ba880734 6227 ice_vsi_cfg_msix(vsi);
cdedef59
AV
6228
6229 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6230 * Tx queue group list was configured and the context bits were
6231 * programmed using ice_vsi_cfg_txqs
6232 */
13a6233b 6233 err = ice_vsi_start_all_rx_rings(vsi);
cdedef59
AV
6234 if (err)
6235 return err;
6236
e97fb1ae 6237 clear_bit(ICE_VSI_DOWN, vsi->state);
2b245cb2 6238 ice_napi_enable_all(vsi);
cdedef59
AV
6239 ice_vsi_ena_irq(vsi);
6240
6241 if (vsi->port_info &&
6242 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6243 vsi->netdev) {
6244 ice_print_link_msg(vsi, true);
6245 netif_tx_start_all_queues(vsi->netdev);
6246 netif_carrier_on(vsi->netdev);
3a749623
JK
6247 if (!ice_is_e810(&pf->hw))
6248 ice_ptp_link_change(pf, pf->hw.pf_id, true);
cdedef59
AV
6249 }
6250
31b6298f
PG
6251 /* Perform an initial read of the statistics registers now to
6252 * set the baseline so counters are ready when interface is up
6253 */
6254 ice_update_eth_stats(vsi);
cdedef59
AV
6255 ice_service_task_schedule(pf);
6256
1b5c19c7 6257 return 0;
cdedef59
AV
6258}
6259
fcea6f3d
AV
6260/**
6261 * ice_up - Bring the connection back up after being down
6262 * @vsi: VSI being configured
6263 */
6264int ice_up(struct ice_vsi *vsi)
6265{
6266 int err;
6267
6268 err = ice_vsi_cfg(vsi);
6269 if (!err)
6270 err = ice_up_complete(vsi);
6271
6272 return err;
6273}
6274
6275/**
6276 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
e72bba21
MF
6277 * @syncp: pointer to u64_stats_sync
6278 * @stats: stats that pkts and bytes count will be taken from
fcea6f3d
AV
6279 * @pkts: packets stats counter
6280 * @bytes: bytes stats counter
6281 *
6282 * This function fetches stats from the ring considering the atomic operations
6283 * that needs to be performed to read u64 values in 32 bit machine.
6284 */
c8ff29b5
MS
6285void
6286ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6287 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
fcea6f3d
AV
6288{
6289 unsigned int start;
fcea6f3d 6290
fcea6f3d 6291 do {
e72bba21
MF
6292 start = u64_stats_fetch_begin_irq(syncp);
6293 *pkts = stats.pkts;
6294 *bytes = stats.bytes;
6295 } while (u64_stats_fetch_retry_irq(syncp, start));
fcea6f3d
AV
6296}
6297
49d358e0
MP
6298/**
6299 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6300 * @vsi: the VSI to be updated
1a0f25a5 6301 * @vsi_stats: the stats struct to be updated
49d358e0
MP
6302 * @rings: rings to work on
6303 * @count: number of rings
6304 */
6305static void
1a0f25a5
JB
6306ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6307 struct rtnl_link_stats64 *vsi_stats,
6308 struct ice_tx_ring **rings, u16 count)
49d358e0 6309{
49d358e0
MP
6310 u16 i;
6311
6312 for (i = 0; i < count; i++) {
e72bba21
MF
6313 struct ice_tx_ring *ring;
6314 u64 pkts = 0, bytes = 0;
49d358e0
MP
6315
6316 ring = READ_ONCE(rings[i]);
f1535469
MF
6317 if (!ring)
6318 continue;
6319 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
49d358e0
MP
6320 vsi_stats->tx_packets += pkts;
6321 vsi_stats->tx_bytes += bytes;
6322 vsi->tx_restart += ring->tx_stats.restart_q;
6323 vsi->tx_busy += ring->tx_stats.tx_busy;
6324 vsi->tx_linearize += ring->tx_stats.tx_linearize;
6325 }
6326}
6327
fcea6f3d
AV
6328/**
6329 * ice_update_vsi_ring_stats - Update VSI stats counters
6330 * @vsi: the VSI to be updated
6331 */
6332static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6333{
1a0f25a5 6334 struct rtnl_link_stats64 *vsi_stats;
fcea6f3d
AV
6335 u64 pkts, bytes;
6336 int i;
6337
1a0f25a5
JB
6338 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6339 if (!vsi_stats)
6340 return;
fcea6f3d
AV
6341
6342 /* reset non-netdev (extended) stats */
6343 vsi->tx_restart = 0;
6344 vsi->tx_busy = 0;
6345 vsi->tx_linearize = 0;
6346 vsi->rx_buf_failed = 0;
6347 vsi->rx_page_failed = 0;
6348
6349 rcu_read_lock();
6350
6351 /* update Tx rings counters */
1a0f25a5
JB
6352 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6353 vsi->num_txq);
fcea6f3d
AV
6354
6355 /* update Rx rings counters */
6356 ice_for_each_rxq(vsi, i) {
e72bba21 6357 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
b6b0501d 6358
e72bba21 6359 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
fcea6f3d
AV
6360 vsi_stats->rx_packets += pkts;
6361 vsi_stats->rx_bytes += bytes;
6362 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
6363 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
6364 }
6365
49d358e0
MP
6366 /* update XDP Tx rings counters */
6367 if (ice_is_xdp_ena_vsi(vsi))
1a0f25a5 6368 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
49d358e0
MP
6369 vsi->num_xdp_txq);
6370
fcea6f3d 6371 rcu_read_unlock();
1a0f25a5
JB
6372
6373 vsi->net_stats.tx_packets = vsi_stats->tx_packets;
6374 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
6375 vsi->net_stats.rx_packets = vsi_stats->rx_packets;
6376 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
6377
6378 kfree(vsi_stats);
fcea6f3d
AV
6379}
6380
6381/**
6382 * ice_update_vsi_stats - Update VSI stats counters
6383 * @vsi: the VSI to be updated
6384 */
5a4a8673 6385void ice_update_vsi_stats(struct ice_vsi *vsi)
fcea6f3d
AV
6386{
6387 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6388 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6389 struct ice_pf *pf = vsi->back;
6390
e97fb1ae 6391 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
7e408e07 6392 test_bit(ICE_CFG_BUSY, pf->state))
fcea6f3d
AV
6393 return;
6394
6395 /* get stats as recorded by Tx/Rx rings */
6396 ice_update_vsi_ring_stats(vsi);
6397
6398 /* get VSI stats as recorded by the hardware */
6399 ice_update_eth_stats(vsi);
6400
6401 cur_ns->tx_errors = cur_es->tx_errors;
51fe27e1 6402 cur_ns->rx_dropped = cur_es->rx_discards;
fcea6f3d
AV
6403 cur_ns->tx_dropped = cur_es->tx_discards;
6404 cur_ns->multicast = cur_es->rx_multicast;
6405
6406 /* update some more netdev stats if this is main VSI */
6407 if (vsi->type == ICE_VSI_PF) {
6408 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6409 cur_ns->rx_errors = pf->stats.crc_errors +
4f1fe43c
BC
6410 pf->stats.illegal_bytes +
6411 pf->stats.rx_len_errors +
6412 pf->stats.rx_undersize +
6413 pf->hw_csum_rx_error +
6414 pf->stats.rx_jabber +
6415 pf->stats.rx_fragments +
6416 pf->stats.rx_oversize;
fcea6f3d 6417 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
56923ab6
BC
6418 /* record drops from the port level */
6419 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
fcea6f3d
AV
6420 }
6421}
6422
6423/**
6424 * ice_update_pf_stats - Update PF port stats counters
6425 * @pf: PF whose stats needs to be updated
6426 */
5a4a8673 6427void ice_update_pf_stats(struct ice_pf *pf)
fcea6f3d
AV
6428{
6429 struct ice_hw_port_stats *prev_ps, *cur_ps;
6430 struct ice_hw *hw = &pf->hw;
4ab95646 6431 u16 fd_ctr_base;
9e7a5d17 6432 u8 port;
fcea6f3d 6433
9e7a5d17 6434 port = hw->port_info->lport;
fcea6f3d
AV
6435 prev_ps = &pf->stats_prev;
6436 cur_ps = &pf->stats;
fcea6f3d 6437
9e7a5d17 6438 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
36517fd3 6439 &prev_ps->eth.rx_bytes,
fcea6f3d
AV
6440 &cur_ps->eth.rx_bytes);
6441
9e7a5d17 6442 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
36517fd3 6443 &prev_ps->eth.rx_unicast,
fcea6f3d
AV
6444 &cur_ps->eth.rx_unicast);
6445
9e7a5d17 6446 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
36517fd3 6447 &prev_ps->eth.rx_multicast,
fcea6f3d
AV
6448 &cur_ps->eth.rx_multicast);
6449
9e7a5d17 6450 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
36517fd3 6451 &prev_ps->eth.rx_broadcast,
fcea6f3d
AV
6452 &cur_ps->eth.rx_broadcast);
6453
56923ab6
BC
6454 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6455 &prev_ps->eth.rx_discards,
6456 &cur_ps->eth.rx_discards);
6457
9e7a5d17 6458 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
36517fd3 6459 &prev_ps->eth.tx_bytes,
fcea6f3d
AV
6460 &cur_ps->eth.tx_bytes);
6461
9e7a5d17 6462 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
36517fd3 6463 &prev_ps->eth.tx_unicast,
fcea6f3d
AV
6464 &cur_ps->eth.tx_unicast);
6465
9e7a5d17 6466 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
36517fd3 6467 &prev_ps->eth.tx_multicast,
fcea6f3d
AV
6468 &cur_ps->eth.tx_multicast);
6469
9e7a5d17 6470 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
36517fd3 6471 &prev_ps->eth.tx_broadcast,
fcea6f3d
AV
6472 &cur_ps->eth.tx_broadcast);
6473
9e7a5d17 6474 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
fcea6f3d
AV
6475 &prev_ps->tx_dropped_link_down,
6476 &cur_ps->tx_dropped_link_down);
6477
9e7a5d17 6478 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
36517fd3 6479 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
fcea6f3d 6480
9e7a5d17 6481 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
36517fd3 6482 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
fcea6f3d 6483
9e7a5d17 6484 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
36517fd3 6485 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
fcea6f3d 6486
9e7a5d17 6487 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
36517fd3 6488 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
fcea6f3d 6489
9e7a5d17 6490 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6491 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6492
9e7a5d17 6493 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6494 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6495
9e7a5d17 6496 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6497 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6498
9e7a5d17 6499 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
36517fd3 6500 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
fcea6f3d 6501
9e7a5d17 6502 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
36517fd3 6503 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
fcea6f3d 6504
9e7a5d17 6505 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
36517fd3 6506 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
fcea6f3d 6507
9e7a5d17 6508 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
36517fd3 6509 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
fcea6f3d 6510
9e7a5d17 6511 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6512 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6513
9e7a5d17 6514 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6515 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6516
9e7a5d17 6517 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
6518 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6519
4ab95646
HT
6520 fd_ctr_base = hw->fd_ctr_base;
6521
6522 ice_stat_update40(hw,
6523 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6524 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6525 &cur_ps->fd_sb_match);
9e7a5d17 6526 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6527 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6528
9e7a5d17 6529 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6530 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6531
9e7a5d17 6532 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6533 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6534
9e7a5d17 6535 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6536 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6537
4b0fdceb
AV
6538 ice_update_dcb_stats(pf);
6539
9e7a5d17 6540 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
fcea6f3d
AV
6541 &prev_ps->crc_errors, &cur_ps->crc_errors);
6542
9e7a5d17 6543 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6544 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6545
9e7a5d17 6546 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6547 &prev_ps->mac_local_faults,
6548 &cur_ps->mac_local_faults);
6549
9e7a5d17 6550 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6551 &prev_ps->mac_remote_faults,
6552 &cur_ps->mac_remote_faults);
6553
9e7a5d17 6554 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6555 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6556
9e7a5d17 6557 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6558 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6559
9e7a5d17 6560 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6561 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6562
9e7a5d17 6563 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6564 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6565
9e7a5d17 6566 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
fcea6f3d
AV
6567 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6568
4ab95646
HT
6569 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6570
fcea6f3d
AV
6571 pf->stat_prev_loaded = true;
6572}
6573
6574/**
6575 * ice_get_stats64 - get statistics for network device structure
6576 * @netdev: network interface device structure
6577 * @stats: main device statistics structure
6578 */
6579static
6580void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6581{
6582 struct ice_netdev_priv *np = netdev_priv(netdev);
6583 struct rtnl_link_stats64 *vsi_stats;
6584 struct ice_vsi *vsi = np->vsi;
6585
6586 vsi_stats = &vsi->net_stats;
6587
3d57fd10 6588 if (!vsi->num_txq || !vsi->num_rxq)
fcea6f3d 6589 return;
3d57fd10 6590
fcea6f3d
AV
6591 /* netdev packet/byte stats come from ring counter. These are obtained
6592 * by summing up ring counters (done by ice_update_vsi_ring_stats).
3d57fd10
DE
6593 * But, only call the update routine and read the registers if VSI is
6594 * not down.
fcea6f3d 6595 */
e97fb1ae 6596 if (!test_bit(ICE_VSI_DOWN, vsi->state))
3d57fd10 6597 ice_update_vsi_ring_stats(vsi);
fcea6f3d
AV
6598 stats->tx_packets = vsi_stats->tx_packets;
6599 stats->tx_bytes = vsi_stats->tx_bytes;
6600 stats->rx_packets = vsi_stats->rx_packets;
6601 stats->rx_bytes = vsi_stats->rx_bytes;
6602
6603 /* The rest of the stats can be read from the hardware but instead we
6604 * just return values that the watchdog task has already obtained from
6605 * the hardware.
6606 */
6607 stats->multicast = vsi_stats->multicast;
6608 stats->tx_errors = vsi_stats->tx_errors;
6609 stats->tx_dropped = vsi_stats->tx_dropped;
6610 stats->rx_errors = vsi_stats->rx_errors;
6611 stats->rx_dropped = vsi_stats->rx_dropped;
6612 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6613 stats->rx_length_errors = vsi_stats->rx_length_errors;
6614}
6615
2b245cb2
AV
6616/**
6617 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6618 * @vsi: VSI having NAPI disabled
6619 */
6620static void ice_napi_disable_all(struct ice_vsi *vsi)
6621{
6622 int q_idx;
6623
6624 if (!vsi->netdev)
6625 return;
6626
0c2561c8 6627 ice_for_each_q_vector(vsi, q_idx) {
eec90376
YX
6628 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6629
e72bba21 6630 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
eec90376 6631 napi_disable(&q_vector->napi);
cdf1f1f1
JK
6632
6633 cancel_work_sync(&q_vector->tx.dim.work);
6634 cancel_work_sync(&q_vector->rx.dim.work);
eec90376 6635 }
2b245cb2
AV
6636}
6637
cdedef59
AV
6638/**
6639 * ice_down - Shutdown the connection
6640 * @vsi: The VSI being stopped
21c6e36b
JB
6641 *
6642 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
cdedef59 6643 */
fcea6f3d 6644int ice_down(struct ice_vsi *vsi)
cdedef59 6645{
c31af68a 6646 int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
cdedef59 6647
21c6e36b
JB
6648 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6649
b3be918d 6650 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
c31af68a 6651 vlan_err = ice_vsi_del_vlan_zero(vsi);
3a749623
JK
6652 if (!ice_is_e810(&vsi->back->hw))
6653 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
cdedef59
AV
6654 netif_carrier_off(vsi->netdev);
6655 netif_tx_disable(vsi->netdev);
b3be918d
GN
6656 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6657 ice_eswitch_stop_all_tx_queues(vsi->back);
cdedef59
AV
6658 }
6659
6660 ice_vsi_dis_irq(vsi);
03f7a986
AV
6661
6662 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
72adf242 6663 if (tx_err)
19cce2c6 6664 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
72adf242 6665 vsi->vsi_num, tx_err);
efc2214b
MF
6666 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6667 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6668 if (tx_err)
19cce2c6 6669 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
efc2214b
MF
6670 vsi->vsi_num, tx_err);
6671 }
72adf242 6672
13a6233b 6673 rx_err = ice_vsi_stop_all_rx_rings(vsi);
72adf242 6674 if (rx_err)
19cce2c6 6675 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
72adf242
AV
6676 vsi->vsi_num, rx_err);
6677
2b245cb2 6678 ice_napi_disable_all(vsi);
cdedef59 6679
ab4ab73f
BA
6680 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6681 link_err = ice_force_phys_link_state(vsi, false);
6682 if (link_err)
19cce2c6 6683 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
ab4ab73f
BA
6684 vsi->vsi_num, link_err);
6685 }
b6f934f0 6686
cdedef59
AV
6687 ice_for_each_txq(vsi, i)
6688 ice_clean_tx_ring(vsi->tx_rings[i]);
6689
6690 ice_for_each_rxq(vsi, i)
6691 ice_clean_rx_ring(vsi->rx_rings[i]);
6692
c31af68a 6693 if (tx_err || rx_err || link_err || vlan_err) {
19cce2c6 6694 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
cdedef59 6695 vsi->vsi_num, vsi->vsw->sw_id);
72adf242
AV
6696 return -EIO;
6697 }
6698
6699 return 0;
cdedef59
AV
6700}
6701
6702/**
6703 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6704 * @vsi: VSI having resources allocated
6705 *
6706 * Return 0 on success, negative on failure
6707 */
0e674aeb 6708int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
cdedef59 6709{
dab0588f 6710 int i, err = 0;
cdedef59
AV
6711
6712 if (!vsi->num_txq) {
9a946843 6713 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
cdedef59
AV
6714 vsi->vsi_num);
6715 return -EINVAL;
6716 }
6717
6718 ice_for_each_txq(vsi, i) {
e72bba21 6719 struct ice_tx_ring *ring = vsi->tx_rings[i];
eb0ee8ab
MS
6720
6721 if (!ring)
6722 return -EINVAL;
6723
1c54c839
GN
6724 if (vsi->netdev)
6725 ring->netdev = vsi->netdev;
eb0ee8ab 6726 err = ice_setup_tx_ring(ring);
cdedef59
AV
6727 if (err)
6728 break;
6729 }
6730
6731 return err;
6732}
6733
6734/**
6735 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6736 * @vsi: VSI having resources allocated
6737 *
6738 * Return 0 on success, negative on failure
6739 */
0e674aeb 6740int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
cdedef59 6741{
dab0588f 6742 int i, err = 0;
cdedef59
AV
6743
6744 if (!vsi->num_rxq) {
9a946843 6745 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
cdedef59
AV
6746 vsi->vsi_num);
6747 return -EINVAL;
6748 }
6749
6750 ice_for_each_rxq(vsi, i) {
e72bba21 6751 struct ice_rx_ring *ring = vsi->rx_rings[i];
eb0ee8ab
MS
6752
6753 if (!ring)
6754 return -EINVAL;
6755
1c54c839
GN
6756 if (vsi->netdev)
6757 ring->netdev = vsi->netdev;
eb0ee8ab 6758 err = ice_setup_rx_ring(ring);
cdedef59
AV
6759 if (err)
6760 break;
6761 }
6762
6763 return err;
6764}
6765
148beb61
HT
6766/**
6767 * ice_vsi_open_ctrl - open control VSI for use
6768 * @vsi: the VSI to open
6769 *
6770 * Initialization of the Control VSI
6771 *
6772 * Returns 0 on success, negative value on error
6773 */
6774int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6775{
6776 char int_name[ICE_INT_NAME_STR_LEN];
6777 struct ice_pf *pf = vsi->back;
6778 struct device *dev;
6779 int err;
6780
6781 dev = ice_pf_to_dev(pf);
6782 /* allocate descriptors */
6783 err = ice_vsi_setup_tx_rings(vsi);
6784 if (err)
6785 goto err_setup_tx;
6786
6787 err = ice_vsi_setup_rx_rings(vsi);
6788 if (err)
6789 goto err_setup_rx;
6790
6791 err = ice_vsi_cfg(vsi);
6792 if (err)
6793 goto err_setup_rx;
6794
6795 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6796 dev_driver_string(dev), dev_name(dev));
6797 err = ice_vsi_req_irq_msix(vsi, int_name);
6798 if (err)
6799 goto err_setup_rx;
6800
6801 ice_vsi_cfg_msix(vsi);
6802
6803 err = ice_vsi_start_all_rx_rings(vsi);
6804 if (err)
6805 goto err_up_complete;
6806
e97fb1ae 6807 clear_bit(ICE_VSI_DOWN, vsi->state);
148beb61
HT
6808 ice_vsi_ena_irq(vsi);
6809
6810 return 0;
6811
6812err_up_complete:
6813 ice_down(vsi);
6814err_setup_rx:
6815 ice_vsi_free_rx_rings(vsi);
6816err_setup_tx:
6817 ice_vsi_free_tx_rings(vsi);
6818
6819 return err;
6820}
6821
cdedef59
AV
6822/**
6823 * ice_vsi_open - Called when a network interface is made active
6824 * @vsi: the VSI to open
6825 *
6826 * Initialization of the VSI
6827 *
6828 * Returns 0 on success, negative value on error
6829 */
1a1c40df 6830int ice_vsi_open(struct ice_vsi *vsi)
cdedef59
AV
6831{
6832 char int_name[ICE_INT_NAME_STR_LEN];
6833 struct ice_pf *pf = vsi->back;
6834 int err;
6835
6836 /* allocate descriptors */
6837 err = ice_vsi_setup_tx_rings(vsi);
6838 if (err)
6839 goto err_setup_tx;
6840
6841 err = ice_vsi_setup_rx_rings(vsi);
6842 if (err)
6843 goto err_setup_rx;
6844
6845 err = ice_vsi_cfg(vsi);
6846 if (err)
6847 goto err_setup_rx;
6848
6849 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4015d11e 6850 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
ba880734 6851 err = ice_vsi_req_irq_msix(vsi, int_name);
cdedef59
AV
6852 if (err)
6853 goto err_setup_rx;
6854
1a1c40df
GN
6855 if (vsi->type == ICE_VSI_PF) {
6856 /* Notify the stack of the actual queue counts. */
6857 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6858 if (err)
6859 goto err_set_qs;
cdedef59 6860
1a1c40df
GN
6861 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6862 if (err)
6863 goto err_set_qs;
6864 }
cdedef59
AV
6865
6866 err = ice_up_complete(vsi);
6867 if (err)
6868 goto err_up_complete;
6869
6870 return 0;
6871
6872err_up_complete:
6873 ice_down(vsi);
6874err_set_qs:
6875 ice_vsi_free_irq(vsi);
6876err_setup_rx:
6877 ice_vsi_free_rx_rings(vsi);
6878err_setup_tx:
6879 ice_vsi_free_tx_rings(vsi);
6880
6881 return err;
6882}
6883
0f9d5027
AV
6884/**
6885 * ice_vsi_release_all - Delete all VSIs
6886 * @pf: PF from which all VSIs are being removed
6887 */
6888static void ice_vsi_release_all(struct ice_pf *pf)
6889{
6890 int err, i;
6891
6892 if (!pf->vsi)
6893 return;
6894
80ed404a 6895 ice_for_each_vsi(pf, i) {
0f9d5027
AV
6896 if (!pf->vsi[i])
6897 continue;
6898
fbc7b27a
KP
6899 if (pf->vsi[i]->type == ICE_VSI_CHNL)
6900 continue;
6901
0f9d5027
AV
6902 err = ice_vsi_release(pf->vsi[i]);
6903 if (err)
19cce2c6 6904 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
0f9d5027
AV
6905 i, err, pf->vsi[i]->vsi_num);
6906 }
6907}
6908
0f9d5027 6909/**
462acf6a
TN
6910 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6911 * @pf: pointer to the PF instance
6912 * @type: VSI type to rebuild
6913 *
6914 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
0f9d5027 6915 */
462acf6a 6916static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
0f9d5027 6917{
4015d11e 6918 struct device *dev = ice_pf_to_dev(pf);
462acf6a 6919 int i, err;
0f9d5027 6920
80ed404a 6921 ice_for_each_vsi(pf, i) {
4425e053 6922 struct ice_vsi *vsi = pf->vsi[i];
0f9d5027 6923
462acf6a 6924 if (!vsi || vsi->type != type)
0f9d5027
AV
6925 continue;
6926
462acf6a 6927 /* rebuild the VSI */
87324e74 6928 err = ice_vsi_rebuild(vsi, true);
0f9d5027 6929 if (err) {
19cce2c6 6930 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
964674f1 6931 err, vsi->idx, ice_vsi_type_str(type));
0f9d5027
AV
6932 return err;
6933 }
6934
462acf6a 6935 /* replay filters for the VSI */
2ccc1c1c
TN
6936 err = ice_replay_vsi(&pf->hw, vsi->idx);
6937 if (err) {
5f87ec48 6938 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
2ccc1c1c 6939 err, vsi->idx, ice_vsi_type_str(type));
c1484691 6940 return err;
462acf6a
TN
6941 }
6942
6943 /* Re-map HW VSI number, using VSI handle that has been
6944 * previously validated in ice_replay_vsi() call above
6945 */
6946 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6947
6948 /* enable the VSI */
6949 err = ice_ena_vsi(vsi, false);
6950 if (err) {
19cce2c6 6951 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
964674f1 6952 err, vsi->idx, ice_vsi_type_str(type));
462acf6a
TN
6953 return err;
6954 }
6955
4015d11e
BC
6956 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6957 ice_vsi_type_str(type));
0f9d5027
AV
6958 }
6959
6960 return 0;
0b28b702
AV
6961}
6962
334cb062 6963/**
462acf6a
TN
6964 * ice_update_pf_netdev_link - Update PF netdev link status
6965 * @pf: pointer to the PF instance
334cb062 6966 */
462acf6a 6967static void ice_update_pf_netdev_link(struct ice_pf *pf)
334cb062 6968{
462acf6a 6969 bool link_up;
334cb062
AV
6970 int i;
6971
80ed404a 6972 ice_for_each_vsi(pf, i) {
4425e053
KK
6973 struct ice_vsi *vsi = pf->vsi[i];
6974
462acf6a
TN
6975 if (!vsi || vsi->type != ICE_VSI_PF)
6976 return;
334cb062 6977
462acf6a
TN
6978 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6979 if (link_up) {
6980 netif_carrier_on(pf->vsi[i]->netdev);
6981 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6982 } else {
6983 netif_carrier_off(pf->vsi[i]->netdev);
6984 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
334cb062 6985 }
334cb062 6986 }
334cb062
AV
6987}
6988
0b28b702
AV
6989/**
6990 * ice_rebuild - rebuild after reset
2f2da36e 6991 * @pf: PF to rebuild
462acf6a 6992 * @reset_type: type of reset
12bb018c
BC
6993 *
6994 * Do not rebuild VF VSI in this flow because that is already handled via
6995 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6996 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6997 * to reset/rebuild all the VF VSI twice.
0b28b702 6998 */
462acf6a 6999static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
0b28b702 7000{
4015d11e 7001 struct device *dev = ice_pf_to_dev(pf);
0b28b702 7002 struct ice_hw *hw = &pf->hw;
a1ffafb0 7003 bool dvm;
462acf6a 7004 int err;
0b28b702 7005
7e408e07 7006 if (test_bit(ICE_DOWN, pf->state))
0b28b702
AV
7007 goto clear_recovery;
7008
462acf6a 7009 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
0b28b702 7010
b537752e 7011#define ICE_EMP_RESET_SLEEP_MS 5000
399e27db
JK
7012 if (reset_type == ICE_RESET_EMPR) {
7013 /* If an EMP reset has occurred, any previously pending flash
7014 * update will have completed. We no longer know whether or
7015 * not the NVM update EMP reset is restricted.
7016 */
7017 pf->fw_emp_reset_disabled = false;
b537752e
PO
7018
7019 msleep(ICE_EMP_RESET_SLEEP_MS);
399e27db
JK
7020 }
7021
2ccc1c1c
TN
7022 err = ice_init_all_ctrlq(hw);
7023 if (err) {
7024 dev_err(dev, "control queues init failed %d\n", err);
0f9d5027 7025 goto err_init_ctrlq;
0b28b702
AV
7026 }
7027
462acf6a
TN
7028 /* if DDP was previously loaded successfully */
7029 if (!ice_is_safe_mode(pf)) {
7030 /* reload the SW DB of filter tables */
7031 if (reset_type == ICE_RESET_PFR)
7032 ice_fill_blk_tbls(hw);
7033 else
7034 /* Reload DDP Package after CORER/GLOBR reset */
7035 ice_load_pkg(NULL, pf);
7036 }
7037
2ccc1c1c
TN
7038 err = ice_clear_pf_cfg(hw);
7039 if (err) {
7040 dev_err(dev, "clear PF configuration failed %d\n", err);
0f9d5027 7041 goto err_init_ctrlq;
0b28b702
AV
7042 }
7043
7044 ice_clear_pxe_mode(hw);
7045
2ccc1c1c
TN
7046 err = ice_init_nvm(hw);
7047 if (err) {
7048 dev_err(dev, "ice_init_nvm failed %d\n", err);
97a4ec01
JK
7049 goto err_init_ctrlq;
7050 }
7051
2ccc1c1c
TN
7052 err = ice_get_caps(hw);
7053 if (err) {
7054 dev_err(dev, "ice_get_caps failed %d\n", err);
0f9d5027 7055 goto err_init_ctrlq;
0b28b702
AV
7056 }
7057
2ccc1c1c
TN
7058 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7059 if (err) {
7060 dev_err(dev, "set_mac_cfg failed %d\n", err);
42449105
AV
7061 goto err_init_ctrlq;
7062 }
7063
a1ffafb0
BC
7064 dvm = ice_is_dvm_ena(hw);
7065
7066 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7067 if (err)
7068 goto err_init_ctrlq;
7069
0f9d5027
AV
7070 err = ice_sched_init_port(hw->port_info);
7071 if (err)
7072 goto err_sched_init_port;
7073
0b28b702 7074 /* start misc vector */
ba880734
BC
7075 err = ice_req_irq_msix_misc(pf);
7076 if (err) {
7077 dev_err(dev, "misc vector setup failed: %d\n", err);
462acf6a 7078 goto err_sched_init_port;
0b28b702
AV
7079 }
7080
83af0039
HT
7081 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7082 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7083 if (!rd32(hw, PFQF_FD_SIZE)) {
7084 u16 unused, guar, b_effort;
7085
7086 guar = hw->func_caps.fd_fltr_guar;
7087 b_effort = hw->func_caps.fd_fltr_best_effort;
7088
7089 /* force guaranteed filter pool for PF */
7090 ice_alloc_fd_guar_item(hw, &unused, guar);
7091 /* force shared filter pool for PF */
7092 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7093 }
7094 }
7095
462acf6a
TN
7096 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7097 ice_dcb_rebuild(pf);
7098
06c16d89
JK
7099 /* If the PF previously had enabled PTP, PTP init needs to happen before
7100 * the VSI rebuild. If not, this causes the PTP link status events to
7101 * fail.
7102 */
7103 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
48096710 7104 ice_ptp_reset(pf);
06c16d89 7105
43113ff7
KK
7106 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7107 ice_gnss_init(pf);
7108
462acf6a
TN
7109 /* rebuild PF VSI */
7110 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
0f9d5027 7111 if (err) {
462acf6a 7112 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
0f9d5027
AV
7113 goto err_vsi_rebuild;
7114 }
0b28b702 7115
48096710
KK
7116 /* configure PTP timestamping after VSI rebuild */
7117 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7118 ice_ptp_cfg_timestamp(pf, false);
7119
b3be918d
GN
7120 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7121 if (err) {
7122 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7123 goto err_vsi_rebuild;
7124 }
7125
fbc7b27a
KP
7126 if (reset_type == ICE_RESET_PFR) {
7127 err = ice_rebuild_channels(pf);
7128 if (err) {
7129 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7130 err);
7131 goto err_vsi_rebuild;
7132 }
7133 }
7134
83af0039
HT
7135 /* If Flow Director is active */
7136 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7137 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7138 if (err) {
7139 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7140 goto err_vsi_rebuild;
7141 }
7142
7143 /* replay HW Flow Director recipes */
7144 if (hw->fdir_prof)
7145 ice_fdir_replay_flows(hw);
7146
7147 /* replay Flow Director filters */
7148 ice_fdir_replay_fltrs(pf);
28bf2672
BC
7149
7150 ice_rebuild_arfs(pf);
83af0039
HT
7151 }
7152
462acf6a
TN
7153 ice_update_pf_netdev_link(pf);
7154
7155 /* tell the firmware we are up */
2ccc1c1c
TN
7156 err = ice_send_version(pf);
7157 if (err) {
5f87ec48 7158 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
2ccc1c1c 7159 err);
462acf6a
TN
7160 goto err_vsi_rebuild;
7161 }
7162
7163 ice_replay_post(hw);
7164
0f9d5027 7165 /* if we get here, reset flow is successful */
7e408e07 7166 clear_bit(ICE_RESET_FAILED, pf->state);
f9f5301e
DE
7167
7168 ice_plug_aux_dev(pf);
0b28b702
AV
7169 return;
7170
0f9d5027 7171err_vsi_rebuild:
0f9d5027
AV
7172err_sched_init_port:
7173 ice_sched_cleanup_all(hw);
7174err_init_ctrlq:
0b28b702 7175 ice_shutdown_all_ctrlq(hw);
7e408e07 7176 set_bit(ICE_RESET_FAILED, pf->state);
0b28b702 7177clear_recovery:
0f9d5027 7178 /* set this bit in PF state to control service task scheduling */
7e408e07 7179 set_bit(ICE_NEEDS_RESTART, pf->state);
0f9d5027 7180 dev_err(dev, "Rebuild failed, unload and reload driver\n");
0b28b702
AV
7181}
7182
23b44513
MF
7183/**
7184 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7185 * @vsi: Pointer to VSI structure
7186 */
7187static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
7188{
7189 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
7190 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
7191 else
7192 return ICE_RXBUF_3072;
7193}
7194
e94d4478
AV
7195/**
7196 * ice_change_mtu - NDO callback to change the MTU
7197 * @netdev: network interface device structure
7198 * @new_mtu: new value for maximum frame size
7199 *
7200 * Returns 0 on success, negative on failure
7201 */
7202static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7203{
7204 struct ice_netdev_priv *np = netdev_priv(netdev);
7205 struct ice_vsi *vsi = np->vsi;
7206 struct ice_pf *pf = vsi->back;
7207 u8 count = 0;
348048e7 7208 int err = 0;
e94d4478 7209
22bef5e7 7210 if (new_mtu == (int)netdev->mtu) {
2f2da36e 7211 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
e94d4478
AV
7212 return 0;
7213 }
7214
efc2214b 7215 if (ice_is_xdp_ena_vsi(vsi)) {
23b44513 7216 int frame_size = ice_max_xdp_frame_size(vsi);
efc2214b
MF
7217
7218 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7219 netdev_err(netdev, "max MTU for XDP usage is %d\n",
23b44513 7220 frame_size - ICE_ETH_PKT_HDR_PAD);
efc2214b
MF
7221 return -EINVAL;
7222 }
7223 }
7224
e94d4478
AV
7225 /* if a reset is in progress, wait for some time for it to complete */
7226 do {
5df7e45d 7227 if (ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
7228 count++;
7229 usleep_range(1000, 2000);
7230 } else {
7231 break;
7232 }
7233
7234 } while (count < 100);
7235
7236 if (count == 100) {
2f2da36e 7237 netdev_err(netdev, "can't change MTU. Device is busy\n");
e94d4478
AV
7238 return -EBUSY;
7239 }
7240
22bef5e7 7241 netdev->mtu = (unsigned int)new_mtu;
e94d4478
AV
7242
7243 /* if VSI is up, bring it down and then back up */
e97fb1ae 7244 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
e94d4478
AV
7245 err = ice_down(vsi);
7246 if (err) {
fe6cd890 7247 netdev_err(netdev, "change MTU if_down err %d\n", err);
97b01291 7248 return err;
e94d4478
AV
7249 }
7250
7251 err = ice_up(vsi);
7252 if (err) {
2f2da36e 7253 netdev_err(netdev, "change MTU if_up err %d\n", err);
97b01291 7254 return err;
e94d4478
AV
7255 }
7256 }
7257
bda5b7db 7258 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
97b01291 7259 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
348048e7
DE
7260
7261 return err;
e94d4478
AV
7262}
7263
77a78115 7264/**
a7605370 7265 * ice_eth_ioctl - Access the hwtstamp interface
77a78115
JK
7266 * @netdev: network interface device structure
7267 * @ifr: interface request data
7268 * @cmd: ioctl command
7269 */
a7605370 7270static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
77a78115
JK
7271{
7272 struct ice_netdev_priv *np = netdev_priv(netdev);
7273 struct ice_pf *pf = np->vsi->back;
7274
7275 switch (cmd) {
7276 case SIOCGHWTSTAMP:
7277 return ice_ptp_get_ts_config(pf, ifr);
7278 case SIOCSHWTSTAMP:
7279 return ice_ptp_set_ts_config(pf, ifr);
7280 default:
7281 return -EOPNOTSUPP;
7282 }
7283}
7284
0fee3577
LY
7285/**
7286 * ice_aq_str - convert AQ err code to a string
7287 * @aq_err: the AQ error code to convert
7288 */
7289const char *ice_aq_str(enum ice_aq_err aq_err)
7290{
7291 switch (aq_err) {
7292 case ICE_AQ_RC_OK:
7293 return "OK";
7294 case ICE_AQ_RC_EPERM:
7295 return "ICE_AQ_RC_EPERM";
7296 case ICE_AQ_RC_ENOENT:
7297 return "ICE_AQ_RC_ENOENT";
7298 case ICE_AQ_RC_ENOMEM:
7299 return "ICE_AQ_RC_ENOMEM";
7300 case ICE_AQ_RC_EBUSY:
7301 return "ICE_AQ_RC_EBUSY";
7302 case ICE_AQ_RC_EEXIST:
7303 return "ICE_AQ_RC_EEXIST";
7304 case ICE_AQ_RC_EINVAL:
7305 return "ICE_AQ_RC_EINVAL";
7306 case ICE_AQ_RC_ENOSPC:
7307 return "ICE_AQ_RC_ENOSPC";
7308 case ICE_AQ_RC_ENOSYS:
7309 return "ICE_AQ_RC_ENOSYS";
b5e19a64
CC
7310 case ICE_AQ_RC_EMODE:
7311 return "ICE_AQ_RC_EMODE";
0fee3577
LY
7312 case ICE_AQ_RC_ENOSEC:
7313 return "ICE_AQ_RC_ENOSEC";
7314 case ICE_AQ_RC_EBADSIG:
7315 return "ICE_AQ_RC_EBADSIG";
7316 case ICE_AQ_RC_ESVN:
7317 return "ICE_AQ_RC_ESVN";
7318 case ICE_AQ_RC_EBADMAN:
7319 return "ICE_AQ_RC_EBADMAN";
7320 case ICE_AQ_RC_EBADBUF:
7321 return "ICE_AQ_RC_EBADBUF";
7322 }
7323
7324 return "ICE_AQ_RC_UNKNOWN";
7325}
7326
d76a60ba 7327/**
b66a972a 7328 * ice_set_rss_lut - Set RSS LUT
d76a60ba 7329 * @vsi: Pointer to VSI structure
d76a60ba
AV
7330 * @lut: Lookup table
7331 * @lut_size: Lookup table size
7332 *
7333 * Returns 0 on success, negative on failure
7334 */
b66a972a 7335int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
d76a60ba 7336{
b66a972a
BC
7337 struct ice_aq_get_set_rss_lut_params params = {};
7338 struct ice_hw *hw = &vsi->back->hw;
5e24d598 7339 int status;
d76a60ba 7340
b66a972a
BC
7341 if (!lut)
7342 return -EINVAL;
d76a60ba 7343
b66a972a
BC
7344 params.vsi_handle = vsi->idx;
7345 params.lut_size = lut_size;
7346 params.lut_type = vsi->rss_lut_type;
7347 params.lut = lut;
d76a60ba 7348
b66a972a 7349 status = ice_aq_set_rss_lut(hw, &params);
c1484691 7350 if (status)
5f87ec48 7351 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
5518ac2a 7352 status, ice_aq_str(hw->adminq.sq_last_status));
d76a60ba 7353
c1484691 7354 return status;
b66a972a 7355}
e3c53928 7356
b66a972a
BC
7357/**
7358 * ice_set_rss_key - Set RSS key
7359 * @vsi: Pointer to the VSI structure
7360 * @seed: RSS hash seed
7361 *
7362 * Returns 0 on success, negative on failure
7363 */
7364int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7365{
7366 struct ice_hw *hw = &vsi->back->hw;
5e24d598 7367 int status;
b66a972a
BC
7368
7369 if (!seed)
7370 return -EINVAL;
7371
7372 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
c1484691 7373 if (status)
5f87ec48 7374 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
5518ac2a 7375 status, ice_aq_str(hw->adminq.sq_last_status));
d76a60ba 7376
c1484691 7377 return status;
d76a60ba
AV
7378}
7379
7380/**
b66a972a 7381 * ice_get_rss_lut - Get RSS LUT
d76a60ba 7382 * @vsi: Pointer to VSI structure
d76a60ba
AV
7383 * @lut: Buffer to store the lookup table entries
7384 * @lut_size: Size of buffer to store the lookup table entries
7385 *
7386 * Returns 0 on success, negative on failure
7387 */
b66a972a 7388int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
d76a60ba 7389{
b66a972a
BC
7390 struct ice_aq_get_set_rss_lut_params params = {};
7391 struct ice_hw *hw = &vsi->back->hw;
5e24d598 7392 int status;
d76a60ba 7393
b66a972a
BC
7394 if (!lut)
7395 return -EINVAL;
d76a60ba 7396
b66a972a
BC
7397 params.vsi_handle = vsi->idx;
7398 params.lut_size = lut_size;
7399 params.lut_type = vsi->rss_lut_type;
7400 params.lut = lut;
7401
7402 status = ice_aq_get_rss_lut(hw, &params);
c1484691 7403 if (status)
5f87ec48 7404 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
5518ac2a 7405 status, ice_aq_str(hw->adminq.sq_last_status));
d76a60ba 7406
c1484691 7407 return status;
b66a972a 7408}
e3c53928 7409
b66a972a
BC
7410/**
7411 * ice_get_rss_key - Get RSS key
7412 * @vsi: Pointer to VSI structure
7413 * @seed: Buffer to store the key in
7414 *
7415 * Returns 0 on success, negative on failure
7416 */
7417int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7418{
7419 struct ice_hw *hw = &vsi->back->hw;
5e24d598 7420 int status;
b66a972a
BC
7421
7422 if (!seed)
7423 return -EINVAL;
7424
7425 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
c1484691 7426 if (status)
5f87ec48 7427 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
5518ac2a 7428 status, ice_aq_str(hw->adminq.sq_last_status));
d76a60ba 7429
c1484691 7430 return status;
d76a60ba
AV
7431}
7432
b1edc14a
MFIP
7433/**
7434 * ice_bridge_getlink - Get the hardware bridge mode
7435 * @skb: skb buff
f9867df6 7436 * @pid: process ID
b1edc14a
MFIP
7437 * @seq: RTNL message seq
7438 * @dev: the netdev being configured
7439 * @filter_mask: filter mask passed in
7440 * @nlflags: netlink flags passed in
7441 *
7442 * Return the bridge mode (VEB/VEPA)
7443 */
7444static int
7445ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7446 struct net_device *dev, u32 filter_mask, int nlflags)
7447{
7448 struct ice_netdev_priv *np = netdev_priv(dev);
7449 struct ice_vsi *vsi = np->vsi;
7450 struct ice_pf *pf = vsi->back;
7451 u16 bmode;
7452
7453 bmode = pf->first_sw->bridge_mode;
7454
7455 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7456 filter_mask, NULL);
7457}
7458
7459/**
7460 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7461 * @vsi: Pointer to VSI structure
7462 * @bmode: Hardware bridge mode (VEB/VEPA)
7463 *
7464 * Returns 0 on success, negative on failure
7465 */
7466static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7467{
b1edc14a
MFIP
7468 struct ice_aqc_vsi_props *vsi_props;
7469 struct ice_hw *hw = &vsi->back->hw;
198a666a 7470 struct ice_vsi_ctx *ctxt;
2ccc1c1c 7471 int ret;
b1edc14a
MFIP
7472
7473 vsi_props = &vsi->info;
198a666a 7474
9efe35d0 7475 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
7476 if (!ctxt)
7477 return -ENOMEM;
7478
7479 ctxt->info = vsi->info;
b1edc14a
MFIP
7480
7481 if (bmode == BRIDGE_MODE_VEB)
7482 /* change from VEPA to VEB mode */
198a666a 7483 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
b1edc14a
MFIP
7484 else
7485 /* change from VEB to VEPA mode */
198a666a
BA
7486 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7487 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
5726ca0e 7488
2ccc1c1c
TN
7489 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7490 if (ret) {
5f87ec48 7491 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
2ccc1c1c 7492 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
198a666a 7493 goto out;
b1edc14a
MFIP
7494 }
7495 /* Update sw flags for book keeping */
198a666a 7496 vsi_props->sw_flags = ctxt->info.sw_flags;
b1edc14a 7497
198a666a 7498out:
9efe35d0 7499 kfree(ctxt);
198a666a 7500 return ret;
b1edc14a
MFIP
7501}
7502
7503/**
7504 * ice_bridge_setlink - Set the hardware bridge mode
7505 * @dev: the netdev being configured
7506 * @nlh: RTNL message
7507 * @flags: bridge setlink flags
2fd527b7 7508 * @extack: netlink extended ack
b1edc14a
MFIP
7509 *
7510 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7511 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7512 * not already set for all VSIs connected to this switch. And also update the
7513 * unicast switch filter rules for the corresponding switch of the netdev.
7514 */
7515static int
7516ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
3d505147
BA
7517 u16 __always_unused flags,
7518 struct netlink_ext_ack __always_unused *extack)
b1edc14a
MFIP
7519{
7520 struct ice_netdev_priv *np = netdev_priv(dev);
7521 struct ice_pf *pf = np->vsi->back;
7522 struct nlattr *attr, *br_spec;
7523 struct ice_hw *hw = &pf->hw;
b1edc14a
MFIP
7524 struct ice_sw *pf_sw;
7525 int rem, v, err = 0;
7526
7527 pf_sw = pf->first_sw;
7528 /* find the attribute in the netlink message */
7529 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7530
7531 nla_for_each_nested(attr, br_spec, rem) {
7532 __u16 mode;
7533
7534 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7535 continue;
7536 mode = nla_get_u16(attr);
7537 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7538 return -EINVAL;
7539 /* Continue if bridge mode is not being flipped */
7540 if (mode == pf_sw->bridge_mode)
7541 continue;
7542 /* Iterates through the PF VSI list and update the loopback
7543 * mode of the VSI
7544 */
7545 ice_for_each_vsi(pf, v) {
7546 if (!pf->vsi[v])
7547 continue;
7548 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7549 if (err)
7550 return err;
7551 }
7552
7553 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7554 /* Update the unicast switch filter rules for the corresponding
7555 * switch of the netdev
7556 */
2ccc1c1c
TN
7557 err = ice_update_sw_rule_bridge_mode(hw);
7558 if (err) {
5f87ec48 7559 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
2ccc1c1c 7560 mode, err,
0fee3577 7561 ice_aq_str(hw->adminq.sq_last_status));
b1edc14a
MFIP
7562 /* revert hw->evb_veb */
7563 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
c1484691 7564 return err;
b1edc14a
MFIP
7565 }
7566
7567 pf_sw->bridge_mode = mode;
7568 }
7569
7570 return 0;
7571}
7572
b3969fd7
SM
7573/**
7574 * ice_tx_timeout - Respond to a Tx Hang
7575 * @netdev: network interface device structure
644f40ea 7576 * @txqueue: Tx queue
b3969fd7 7577 */
0290bd29 7578static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
b3969fd7
SM
7579{
7580 struct ice_netdev_priv *np = netdev_priv(netdev);
e72bba21 7581 struct ice_tx_ring *tx_ring = NULL;
b3969fd7
SM
7582 struct ice_vsi *vsi = np->vsi;
7583 struct ice_pf *pf = vsi->back;
807bc98d 7584 u32 i;
b3969fd7
SM
7585
7586 pf->tx_timeout_count++;
7587
610ed0e9
AJ
7588 /* Check if PFC is enabled for the TC to which the queue belongs
7589 * to. If yes then Tx timeout is not caused by a hung queue, no
7590 * need to reset and rebuild
7591 */
7592 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7593 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7594 txqueue);
7595 return;
7596 }
7597
ed5a3f66 7598 /* now that we have an index, find the tx_ring struct */
2faf63b6 7599 ice_for_each_txq(vsi, i)
ed5a3f66
JF
7600 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7601 if (txqueue == vsi->tx_rings[i]->q_index) {
7602 tx_ring = vsi->tx_rings[i];
7603 break;
7604 }
b3969fd7
SM
7605
7606 /* Reset recovery level if enough time has elapsed after last timeout.
7607 * Also ensure no new reset action happens before next timeout period.
7608 */
7609 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7610 pf->tx_timeout_recovery_level = 1;
7611 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7612 netdev->watchdog_timeo)))
7613 return;
7614
7615 if (tx_ring) {
807bc98d
BC
7616 struct ice_hw *hw = &pf->hw;
7617 u32 head, val = 0;
7618
ed5a3f66 7619 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
807bc98d 7620 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
b3969fd7 7621 /* Read interrupt register */
ba880734 7622 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
b3969fd7 7623
93ff4858 7624 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
ed5a3f66 7625 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
807bc98d 7626 head, tx_ring->next_to_use, val);
b3969fd7
SM
7627 }
7628
7629 pf->tx_timeout_last_recovery = jiffies;
93ff4858 7630 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
ed5a3f66 7631 pf->tx_timeout_recovery_level, txqueue);
b3969fd7
SM
7632
7633 switch (pf->tx_timeout_recovery_level) {
7634 case 1:
7e408e07 7635 set_bit(ICE_PFR_REQ, pf->state);
b3969fd7
SM
7636 break;
7637 case 2:
7e408e07 7638 set_bit(ICE_CORER_REQ, pf->state);
b3969fd7
SM
7639 break;
7640 case 3:
7e408e07 7641 set_bit(ICE_GLOBR_REQ, pf->state);
b3969fd7
SM
7642 break;
7643 default:
7644 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7e408e07 7645 set_bit(ICE_DOWN, pf->state);
e97fb1ae 7646 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7e408e07 7647 set_bit(ICE_SERVICE_DIS, pf->state);
b3969fd7
SM
7648 break;
7649 }
7650
7651 ice_service_task_schedule(pf);
7652 pf->tx_timeout_recovery_level++;
7653}
7654
0d08a441
KP
7655/**
7656 * ice_setup_tc_cls_flower - flower classifier offloads
7657 * @np: net device to configure
7658 * @filter_dev: device on which filter is added
7659 * @cls_flower: offload data
7660 */
7661static int
7662ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7663 struct net_device *filter_dev,
7664 struct flow_cls_offload *cls_flower)
7665{
7666 struct ice_vsi *vsi = np->vsi;
7667
7668 if (cls_flower->common.chain_index)
7669 return -EOPNOTSUPP;
7670
7671 switch (cls_flower->command) {
7672 case FLOW_CLS_REPLACE:
7673 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7674 case FLOW_CLS_DESTROY:
7675 return ice_del_cls_flower(vsi, cls_flower);
7676 default:
7677 return -EINVAL;
7678 }
7679}
7680
7681/**
7682 * ice_setup_tc_block_cb - callback handler registered for TC block
7683 * @type: TC SETUP type
7684 * @type_data: TC flower offload data that contains user input
7685 * @cb_priv: netdev private data
7686 */
7687static int
7688ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7689{
7690 struct ice_netdev_priv *np = cb_priv;
7691
7692 switch (type) {
7693 case TC_SETUP_CLSFLOWER:
7694 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7695 type_data);
7696 default:
7697 return -EOPNOTSUPP;
7698 }
7699}
7700
fbc7b27a
KP
7701/**
7702 * ice_validate_mqprio_qopt - Validate TCF input parameters
7703 * @vsi: Pointer to VSI
7704 * @mqprio_qopt: input parameters for mqprio queue configuration
7705 *
7706 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7707 * needed), and make sure user doesn't specify qcount and BW rate limit
7708 * for TCs, which are more than "num_tc"
7709 */
7710static int
7711ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7712 struct tc_mqprio_qopt_offload *mqprio_qopt)
7713{
7714 u64 sum_max_rate = 0, sum_min_rate = 0;
7715 int non_power_of_2_qcount = 0;
7716 struct ice_pf *pf = vsi->back;
7717 int max_rss_q_cnt = 0;
7718 struct device *dev;
7719 int i, speed;
7720 u8 num_tc;
7721
7722 if (vsi->type != ICE_VSI_PF)
7723 return -EINVAL;
7724
7725 if (mqprio_qopt->qopt.offset[0] != 0 ||
7726 mqprio_qopt->qopt.num_tc < 1 ||
7727 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7728 return -EINVAL;
7729
7730 dev = ice_pf_to_dev(pf);
7731 vsi->ch_rss_size = 0;
7732 num_tc = mqprio_qopt->qopt.num_tc;
7733
7734 for (i = 0; num_tc; i++) {
7735 int qcount = mqprio_qopt->qopt.count[i];
7736 u64 max_rate, min_rate, rem;
7737
7738 if (!qcount)
7739 return -EINVAL;
7740
7741 if (is_power_of_2(qcount)) {
7742 if (non_power_of_2_qcount &&
7743 qcount > non_power_of_2_qcount) {
7744 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7745 qcount, non_power_of_2_qcount);
7746 return -EINVAL;
7747 }
7748 if (qcount > max_rss_q_cnt)
7749 max_rss_q_cnt = qcount;
7750 } else {
7751 if (non_power_of_2_qcount &&
7752 qcount != non_power_of_2_qcount) {
7753 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7754 qcount, non_power_of_2_qcount);
7755 return -EINVAL;
7756 }
7757 if (qcount < max_rss_q_cnt) {
7758 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7759 qcount, max_rss_q_cnt);
7760 return -EINVAL;
7761 }
7762 max_rss_q_cnt = qcount;
7763 non_power_of_2_qcount = qcount;
7764 }
7765
7766 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7767 * converts the bandwidth rate limit into Bytes/s when
7768 * passing it down to the driver. So convert input bandwidth
7769 * from Bytes/s to Kbps
7770 */
7771 max_rate = mqprio_qopt->max_rate[i];
7772 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7773 sum_max_rate += max_rate;
7774
7775 /* min_rate is minimum guaranteed rate and it can't be zero */
7776 min_rate = mqprio_qopt->min_rate[i];
7777 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7778 sum_min_rate += min_rate;
7779
7780 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7781 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7782 min_rate, ICE_MIN_BW_LIMIT);
7783 return -EINVAL;
7784 }
7785
7786 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7787 if (rem) {
7788 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7789 i, ICE_MIN_BW_LIMIT);
7790 return -EINVAL;
7791 }
7792
7793 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7794 if (rem) {
7795 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7796 i, ICE_MIN_BW_LIMIT);
7797 return -EINVAL;
7798 }
7799
7800 /* min_rate can't be more than max_rate, except when max_rate
7801 * is zero (implies max_rate sought is max line rate). In such
7802 * a case min_rate can be more than max.
7803 */
7804 if (max_rate && min_rate > max_rate) {
7805 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7806 min_rate, max_rate);
7807 return -EINVAL;
7808 }
7809
7810 if (i >= mqprio_qopt->qopt.num_tc - 1)
7811 break;
7812 if (mqprio_qopt->qopt.offset[i + 1] !=
7813 (mqprio_qopt->qopt.offset[i] + qcount))
7814 return -EINVAL;
7815 }
7816 if (vsi->num_rxq <
7817 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7818 return -EINVAL;
7819 if (vsi->num_txq <
7820 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7821 return -EINVAL;
7822
7823 speed = ice_get_link_speed_kbps(vsi);
7824 if (sum_max_rate && sum_max_rate > (u64)speed) {
7825 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7826 sum_max_rate, speed);
7827 return -EINVAL;
7828 }
7829 if (sum_min_rate && sum_min_rate > (u64)speed) {
7830 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7831 sum_min_rate, speed);
7832 return -EINVAL;
7833 }
7834
7835 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7836 vsi->ch_rss_size = max_rss_q_cnt;
7837
7838 return 0;
7839}
7840
40319796
KP
7841/**
7842 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7843 * @pf: ptr to PF device
7844 * @vsi: ptr to VSI
7845 */
7846static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7847{
7848 struct device *dev = ice_pf_to_dev(pf);
7849 bool added = false;
7850 struct ice_hw *hw;
7851 int flow;
7852
7853 if (!(vsi->num_gfltr || vsi->num_bfltr))
7854 return -EINVAL;
7855
7856 hw = &pf->hw;
7857 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
7858 struct ice_fd_hw_prof *prof;
7859 int tun, status;
7860 u64 entry_h;
7861
7862 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
7863 hw->fdir_prof[flow]->cnt))
7864 continue;
7865
7866 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
7867 enum ice_flow_priority prio;
7868 u64 prof_id;
7869
7870 /* add this VSI to FDir profile for this flow */
7871 prio = ICE_FLOW_PRIO_NORMAL;
7872 prof = hw->fdir_prof[flow];
7873 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
7874 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
7875 prof->vsi_h[0], vsi->idx,
7876 prio, prof->fdir_seg[tun],
7877 &entry_h);
7878 if (status) {
7879 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
7880 vsi->idx, flow);
7881 continue;
7882 }
7883
7884 prof->entry_h[prof->cnt][tun] = entry_h;
7885 }
7886
7887 /* store VSI for filter replay and delete */
7888 prof->vsi_h[prof->cnt] = vsi->idx;
7889 prof->cnt++;
7890
7891 added = true;
7892 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
7893 flow);
7894 }
7895
7896 if (!added)
7897 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
7898
7899 return 0;
7900}
7901
fbc7b27a
KP
7902/**
7903 * ice_add_channel - add a channel by adding VSI
7904 * @pf: ptr to PF device
7905 * @sw_id: underlying HW switching element ID
7906 * @ch: ptr to channel structure
7907 *
7908 * Add a channel (VSI) using add_vsi and queue_map
7909 */
7910static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7911{
7912 struct device *dev = ice_pf_to_dev(pf);
7913 struct ice_vsi *vsi;
7914
7915 if (ch->type != ICE_VSI_CHNL) {
7916 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7917 return -EINVAL;
7918 }
7919
7920 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7921 if (!vsi || vsi->type != ICE_VSI_CHNL) {
7922 dev_err(dev, "create chnl VSI failure\n");
7923 return -EINVAL;
7924 }
7925
40319796
KP
7926 ice_add_vsi_to_fdir(pf, vsi);
7927
fbc7b27a
KP
7928 ch->sw_id = sw_id;
7929 ch->vsi_num = vsi->vsi_num;
7930 ch->info.mapping_flags = vsi->info.mapping_flags;
7931 ch->ch_vsi = vsi;
7932 /* set the back pointer of channel for newly created VSI */
7933 vsi->ch = ch;
7934
7935 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7936 sizeof(vsi->info.q_mapping));
7937 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7938 sizeof(vsi->info.tc_mapping));
7939
7940 return 0;
7941}
7942
7943/**
7944 * ice_chnl_cfg_res
7945 * @vsi: the VSI being setup
7946 * @ch: ptr to channel structure
7947 *
7948 * Configure channel specific resources such as rings, vector.
7949 */
7950static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7951{
7952 int i;
7953
7954 for (i = 0; i < ch->num_txq; i++) {
7955 struct ice_q_vector *tx_q_vector, *rx_q_vector;
7956 struct ice_ring_container *rc;
7957 struct ice_tx_ring *tx_ring;
7958 struct ice_rx_ring *rx_ring;
7959
7960 tx_ring = vsi->tx_rings[ch->base_q + i];
7961 rx_ring = vsi->rx_rings[ch->base_q + i];
7962 if (!tx_ring || !rx_ring)
7963 continue;
7964
7965 /* setup ring being channel enabled */
7966 tx_ring->ch = ch;
7967 rx_ring->ch = ch;
7968
7969 /* following code block sets up vector specific attributes */
7970 tx_q_vector = tx_ring->q_vector;
7971 rx_q_vector = rx_ring->q_vector;
7972 if (!tx_q_vector && !rx_q_vector)
7973 continue;
7974
7975 if (tx_q_vector) {
7976 tx_q_vector->ch = ch;
7977 /* setup Tx and Rx ITR setting if DIM is off */
7978 rc = &tx_q_vector->tx;
7979 if (!ITR_IS_DYNAMIC(rc))
7980 ice_write_itr(rc, rc->itr_setting);
7981 }
7982 if (rx_q_vector) {
7983 rx_q_vector->ch = ch;
7984 /* setup Tx and Rx ITR setting if DIM is off */
7985 rc = &rx_q_vector->rx;
7986 if (!ITR_IS_DYNAMIC(rc))
7987 ice_write_itr(rc, rc->itr_setting);
7988 }
7989 }
7990
7991 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
7992 * GLINT_ITR register would have written to perform in-context
7993 * update, hence perform flush
7994 */
7995 if (ch->num_txq || ch->num_rxq)
7996 ice_flush(&vsi->back->hw);
7997}
7998
7999/**
8000 * ice_cfg_chnl_all_res - configure channel resources
8001 * @vsi: pte to main_vsi
8002 * @ch: ptr to channel structure
8003 *
8004 * This function configures channel specific resources such as flow-director
8005 * counter index, and other resources such as queues, vectors, ITR settings
8006 */
8007static void
8008ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8009{
8010 /* configure channel (aka ADQ) resources such as queues, vectors,
8011 * ITR settings for channel specific vectors and anything else
8012 */
8013 ice_chnl_cfg_res(vsi, ch);
8014}
8015
8016/**
8017 * ice_setup_hw_channel - setup new channel
8018 * @pf: ptr to PF device
8019 * @vsi: the VSI being setup
8020 * @ch: ptr to channel structure
8021 * @sw_id: underlying HW switching element ID
8022 * @type: type of channel to be created (VMDq2/VF)
8023 *
8024 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8025 * and configures Tx rings accordingly
8026 */
8027static int
8028ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8029 struct ice_channel *ch, u16 sw_id, u8 type)
8030{
8031 struct device *dev = ice_pf_to_dev(pf);
8032 int ret;
8033
8034 ch->base_q = vsi->next_base_q;
8035 ch->type = type;
8036
8037 ret = ice_add_channel(pf, sw_id, ch);
8038 if (ret) {
8039 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8040 return ret;
8041 }
8042
8043 /* configure/setup ADQ specific resources */
8044 ice_cfg_chnl_all_res(vsi, ch);
8045
8046 /* make sure to update the next_base_q so that subsequent channel's
8047 * (aka ADQ) VSI queue map is correct
8048 */
8049 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8050 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8051 ch->num_rxq);
8052
8053 return 0;
8054}
8055
8056/**
8057 * ice_setup_channel - setup new channel using uplink element
8058 * @pf: ptr to PF device
8059 * @vsi: the VSI being setup
8060 * @ch: ptr to channel structure
8061 *
8062 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8063 * and uplink switching element
8064 */
8065static bool
8066ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8067 struct ice_channel *ch)
8068{
8069 struct device *dev = ice_pf_to_dev(pf);
8070 u16 sw_id;
8071 int ret;
8072
8073 if (vsi->type != ICE_VSI_PF) {
8074 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8075 return false;
8076 }
8077
8078 sw_id = pf->first_sw->sw_id;
8079
8080 /* create channel (VSI) */
8081 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8082 if (ret) {
8083 dev_err(dev, "failed to setup hw_channel\n");
8084 return false;
8085 }
8086 dev_dbg(dev, "successfully created channel()\n");
8087
8088 return ch->ch_vsi ? true : false;
8089}
8090
8091/**
8092 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8093 * @vsi: VSI to be configured
8094 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8095 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8096 */
8097static int
8098ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8099{
8100 int err;
8101
8102 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8103 if (err)
8104 return err;
8105
8106 return ice_set_max_bw_limit(vsi, max_tx_rate);
8107}
8108
8109/**
8110 * ice_create_q_channel - function to create channel
8111 * @vsi: VSI to be configured
8112 * @ch: ptr to channel (it contains channel specific params)
8113 *
8114 * This function creates channel (VSI) using num_queues specified by user,
8115 * reconfigs RSS if needed.
8116 */
8117static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8118{
8119 struct ice_pf *pf = vsi->back;
8120 struct device *dev;
8121
8122 if (!ch)
8123 return -EINVAL;
8124
8125 dev = ice_pf_to_dev(pf);
8126 if (!ch->num_txq || !ch->num_rxq) {
8127 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8128 return -EINVAL;
8129 }
8130
8131 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8132 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8133 vsi->cnt_q_avail, ch->num_txq);
8134 return -EINVAL;
8135 }
8136
8137 if (!ice_setup_channel(pf, vsi, ch)) {
8138 dev_info(dev, "Failed to setup channel\n");
8139 return -EINVAL;
8140 }
8141 /* configure BW rate limit */
8142 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8143 int ret;
8144
8145 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8146 ch->min_tx_rate);
8147 if (ret)
8148 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8149 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8150 else
8151 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8152 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8153 }
8154
8155 vsi->cnt_q_avail -= ch->num_txq;
8156
8157 return 0;
8158}
8159
9fea7498
KP
8160/**
8161 * ice_rem_all_chnl_fltrs - removes all channel filters
8162 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8163 *
8164 * Remove all advanced switch filters only if they are channel specific
8165 * tc-flower based filter
8166 */
8167static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8168{
8169 struct ice_tc_flower_fltr *fltr;
8170 struct hlist_node *node;
8171
8172 /* to remove all channel filters, iterate an ordered list of filters */
8173 hlist_for_each_entry_safe(fltr, node,
8174 &pf->tc_flower_fltr_list,
8175 tc_flower_node) {
8176 struct ice_rule_query_data rule;
8177 int status;
8178
8179 /* for now process only channel specific filters */
8180 if (!ice_is_chnl_fltr(fltr))
8181 continue;
8182
8183 rule.rid = fltr->rid;
8184 rule.rule_id = fltr->rule_id;
8185 rule.vsi_handle = fltr->dest_id;
8186 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8187 if (status) {
8188 if (status == -ENOENT)
8189 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8190 rule.rule_id);
8191 else
8192 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8193 status);
8194 } else if (fltr->dest_vsi) {
8195 /* update advanced switch filter count */
8196 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8197 u32 flags = fltr->flags;
8198
8199 fltr->dest_vsi->num_chnl_fltr--;
8200 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8201 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8202 pf->num_dmac_chnl_fltrs--;
8203 }
8204 }
8205
8206 hlist_del(&fltr->tc_flower_node);
8207 kfree(fltr);
8208 }
8209}
8210
fbc7b27a
KP
8211/**
8212 * ice_remove_q_channels - Remove queue channels for the TCs
8213 * @vsi: VSI to be configured
8214 * @rem_fltr: delete advanced switch filter or not
8215 *
8216 * Remove queue channels for the TCs
8217 */
9fea7498 8218static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
fbc7b27a
KP
8219{
8220 struct ice_channel *ch, *ch_tmp;
9fea7498 8221 struct ice_pf *pf = vsi->back;
fbc7b27a
KP
8222 int i;
8223
9fea7498
KP
8224 /* remove all tc-flower based filter if they are channel filters only */
8225 if (rem_fltr)
8226 ice_rem_all_chnl_fltrs(pf);
8227
40319796
KP
8228 /* remove ntuple filters since queue configuration is being changed */
8229 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8230 struct ice_hw *hw = &pf->hw;
8231
8232 mutex_lock(&hw->fdir_fltr_lock);
8233 ice_fdir_del_all_fltrs(vsi);
8234 mutex_unlock(&hw->fdir_fltr_lock);
8235 }
8236
fbc7b27a
KP
8237 /* perform cleanup for channels if they exist */
8238 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8239 struct ice_vsi *ch_vsi;
8240
8241 list_del(&ch->list);
8242 ch_vsi = ch->ch_vsi;
8243 if (!ch_vsi) {
8244 kfree(ch);
8245 continue;
8246 }
8247
8248 /* Reset queue contexts */
8249 for (i = 0; i < ch->num_rxq; i++) {
8250 struct ice_tx_ring *tx_ring;
8251 struct ice_rx_ring *rx_ring;
8252
8253 tx_ring = vsi->tx_rings[ch->base_q + i];
8254 rx_ring = vsi->rx_rings[ch->base_q + i];
8255 if (tx_ring) {
8256 tx_ring->ch = NULL;
8257 if (tx_ring->q_vector)
8258 tx_ring->q_vector->ch = NULL;
8259 }
8260 if (rx_ring) {
8261 rx_ring->ch = NULL;
8262 if (rx_ring->q_vector)
8263 rx_ring->q_vector->ch = NULL;
8264 }
8265 }
8266
40319796
KP
8267 /* Release FD resources for the channel VSI */
8268 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8269
fbc7b27a
KP
8270 /* clear the VSI from scheduler tree */
8271 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8272
8273 /* Delete VSI from FW */
8274 ice_vsi_delete(ch->ch_vsi);
8275
8276 /* Delete VSI from PF and HW VSI arrays */
8277 ice_vsi_clear(ch->ch_vsi);
8278
8279 /* free the channel */
8280 kfree(ch);
8281 }
8282
8283 /* clear the channel VSI map which is stored in main VSI */
8284 ice_for_each_chnl_tc(i)
8285 vsi->tc_map_vsi[i] = NULL;
8286
8287 /* reset main VSI's all TC information */
8288 vsi->all_enatc = 0;
8289 vsi->all_numtc = 0;
8290}
8291
8292/**
8293 * ice_rebuild_channels - rebuild channel
8294 * @pf: ptr to PF
8295 *
8296 * Recreate channel VSIs and replay filters
8297 */
8298static int ice_rebuild_channels(struct ice_pf *pf)
8299{
8300 struct device *dev = ice_pf_to_dev(pf);
8301 struct ice_vsi *main_vsi;
8302 bool rem_adv_fltr = true;
8303 struct ice_channel *ch;
8304 struct ice_vsi *vsi;
8305 int tc_idx = 1;
8306 int i, err;
8307
8308 main_vsi = ice_get_main_vsi(pf);
8309 if (!main_vsi)
8310 return 0;
8311
8312 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8313 main_vsi->old_numtc == 1)
8314 return 0; /* nothing to be done */
8315
8316 /* reconfigure main VSI based on old value of TC and cached values
8317 * for MQPRIO opts
8318 */
8319 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8320 if (err) {
8321 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8322 main_vsi->old_ena_tc, main_vsi->vsi_num);
8323 return err;
8324 }
8325
8326 /* rebuild ADQ VSIs */
8327 ice_for_each_vsi(pf, i) {
8328 enum ice_vsi_type type;
8329
8330 vsi = pf->vsi[i];
8331 if (!vsi || vsi->type != ICE_VSI_CHNL)
8332 continue;
8333
8334 type = vsi->type;
8335
8336 /* rebuild ADQ VSI */
8337 err = ice_vsi_rebuild(vsi, true);
8338 if (err) {
8339 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8340 ice_vsi_type_str(type), vsi->idx, err);
8341 goto cleanup;
8342 }
8343
8344 /* Re-map HW VSI number, using VSI handle that has been
8345 * previously validated in ice_replay_vsi() call above
8346 */
8347 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8348
8349 /* replay filters for the VSI */
8350 err = ice_replay_vsi(&pf->hw, vsi->idx);
8351 if (err) {
8352 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8353 ice_vsi_type_str(type), err, vsi->idx);
8354 rem_adv_fltr = false;
8355 goto cleanup;
8356 }
8357 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8358 ice_vsi_type_str(type), vsi->idx);
8359
8360 /* store ADQ VSI at correct TC index in main VSI's
8361 * map of TC to VSI
8362 */
8363 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8364 }
8365
8366 /* ADQ VSI(s) has been rebuilt successfully, so setup
8367 * channel for main VSI's Tx and Rx rings
8368 */
8369 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8370 struct ice_vsi *ch_vsi;
8371
8372 ch_vsi = ch->ch_vsi;
8373 if (!ch_vsi)
8374 continue;
8375
8376 /* reconfig channel resources */
8377 ice_cfg_chnl_all_res(main_vsi, ch);
8378
8379 /* replay BW rate limit if it is non-zero */
8380 if (!ch->max_tx_rate && !ch->min_tx_rate)
8381 continue;
8382
8383 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8384 ch->min_tx_rate);
8385 if (err)
8386 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8387 err, ch->max_tx_rate, ch->min_tx_rate,
8388 ch_vsi->vsi_num);
8389 else
8390 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8391 ch->max_tx_rate, ch->min_tx_rate,
8392 ch_vsi->vsi_num);
8393 }
8394
8395 /* reconfig RSS for main VSI */
8396 if (main_vsi->ch_rss_size)
8397 ice_vsi_cfg_rss_lut_key(main_vsi);
8398
8399 return 0;
8400
8401cleanup:
8402 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8403 return err;
8404}
8405
8406/**
8407 * ice_create_q_channels - Add queue channel for the given TCs
8408 * @vsi: VSI to be configured
8409 *
8410 * Configures queue channel mapping to the given TCs
8411 */
8412static int ice_create_q_channels(struct ice_vsi *vsi)
8413{
8414 struct ice_pf *pf = vsi->back;
8415 struct ice_channel *ch;
8416 int ret = 0, i;
8417
8418 ice_for_each_chnl_tc(i) {
8419 if (!(vsi->all_enatc & BIT(i)))
8420 continue;
8421
8422 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8423 if (!ch) {
8424 ret = -ENOMEM;
8425 goto err_free;
8426 }
8427 INIT_LIST_HEAD(&ch->list);
8428 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8429 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8430 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8431 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8432 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8433
8434 /* convert to Kbits/s */
8435 if (ch->max_tx_rate)
8436 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8437 ICE_BW_KBPS_DIVISOR);
8438 if (ch->min_tx_rate)
8439 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8440 ICE_BW_KBPS_DIVISOR);
8441
8442 ret = ice_create_q_channel(vsi, ch);
8443 if (ret) {
8444 dev_err(ice_pf_to_dev(pf),
8445 "failed creating channel TC:%d\n", i);
8446 kfree(ch);
8447 goto err_free;
8448 }
8449 list_add_tail(&ch->list, &vsi->ch_list);
8450 vsi->tc_map_vsi[i] = ch->ch_vsi;
8451 dev_dbg(ice_pf_to_dev(pf),
8452 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8453 }
8454 return 0;
8455
8456err_free:
8457 ice_remove_q_channels(vsi, false);
8458
8459 return ret;
8460}
8461
8462/**
8463 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8464 * @netdev: net device to configure
8465 * @type_data: TC offload data
8466 */
8467static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8468{
8469 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8470 struct ice_netdev_priv *np = netdev_priv(netdev);
8471 struct ice_vsi *vsi = np->vsi;
8472 struct ice_pf *pf = vsi->back;
8473 u16 mode, ena_tc_qdisc = 0;
8474 int cur_txq, cur_rxq;
8475 u8 hw = 0, num_tcf;
8476 struct device *dev;
8477 int ret, i;
8478
8479 dev = ice_pf_to_dev(pf);
8480 num_tcf = mqprio_qopt->qopt.num_tc;
8481 hw = mqprio_qopt->qopt.hw;
8482 mode = mqprio_qopt->mode;
8483 if (!hw) {
8484 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8485 vsi->ch_rss_size = 0;
8486 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8487 goto config_tcf;
8488 }
8489
8490 /* Generate queue region map for number of TCF requested */
8491 for (i = 0; i < num_tcf; i++)
8492 ena_tc_qdisc |= BIT(i);
8493
8494 switch (mode) {
8495 case TC_MQPRIO_MODE_CHANNEL:
8496
8497 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8498 if (ret) {
8499 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8500 ret);
8501 return ret;
8502 }
8503 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8504 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9fea7498
KP
8505 /* don't assume state of hw_tc_offload during driver load
8506 * and set the flag for TC flower filter if hw_tc_offload
8507 * already ON
8508 */
8509 if (vsi->netdev->features & NETIF_F_HW_TC)
8510 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
fbc7b27a
KP
8511 break;
8512 default:
8513 return -EINVAL;
8514 }
8515
8516config_tcf:
8517
8518 /* Requesting same TCF configuration as already enabled */
8519 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8520 mode != TC_MQPRIO_MODE_CHANNEL)
8521 return 0;
8522
8523 /* Pause VSI queues */
8524 ice_dis_vsi(vsi, true);
8525
8526 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8527 ice_remove_q_channels(vsi, true);
8528
8529 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8530 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8531 num_online_cpus());
8532 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8533 num_online_cpus());
8534 } else {
8535 /* logic to rebuild VSI, same like ethtool -L */
8536 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8537
8538 for (i = 0; i < num_tcf; i++) {
8539 if (!(ena_tc_qdisc & BIT(i)))
8540 continue;
8541
8542 offset = vsi->mqprio_qopt.qopt.offset[i];
8543 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8544 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8545 }
8546 vsi->req_txq = offset + qcount_tx;
8547 vsi->req_rxq = offset + qcount_rx;
8548
8549 /* store away original rss_size info, so that it gets reused
8550 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8551 * determine, what should be the rss_sizefor main VSI
8552 */
8553 vsi->orig_rss_size = vsi->rss_size;
8554 }
8555
8556 /* save current values of Tx and Rx queues before calling VSI rebuild
8557 * for fallback option
8558 */
8559 cur_txq = vsi->num_txq;
8560 cur_rxq = vsi->num_rxq;
8561
8562 /* proceed with rebuild main VSI using correct number of queues */
8563 ret = ice_vsi_rebuild(vsi, false);
8564 if (ret) {
8565 /* fallback to current number of queues */
8566 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8567 vsi->req_txq = cur_txq;
8568 vsi->req_rxq = cur_rxq;
8569 clear_bit(ICE_RESET_FAILED, pf->state);
8570 if (ice_vsi_rebuild(vsi, false)) {
8571 dev_err(dev, "Rebuild of main VSI failed again\n");
8572 return ret;
8573 }
8574 }
8575
8576 vsi->all_numtc = num_tcf;
8577 vsi->all_enatc = ena_tc_qdisc;
8578 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8579 if (ret) {
8580 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8581 vsi->vsi_num);
8582 goto exit;
8583 }
8584
8585 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8586 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8587 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8588
8589 /* set TC0 rate limit if specified */
8590 if (max_tx_rate || min_tx_rate) {
8591 /* convert to Kbits/s */
8592 if (max_tx_rate)
8593 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8594 if (min_tx_rate)
8595 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8596
8597 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8598 if (!ret) {
8599 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8600 max_tx_rate, min_tx_rate, vsi->vsi_num);
8601 } else {
8602 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8603 max_tx_rate, min_tx_rate, vsi->vsi_num);
8604 goto exit;
8605 }
8606 }
8607 ret = ice_create_q_channels(vsi);
8608 if (ret) {
8609 netdev_err(netdev, "failed configuring queue channels\n");
8610 goto exit;
8611 } else {
8612 netdev_dbg(netdev, "successfully configured channels\n");
8613 }
8614 }
8615
8616 if (vsi->ch_rss_size)
8617 ice_vsi_cfg_rss_lut_key(vsi);
8618
8619exit:
8620 /* if error, reset the all_numtc and all_enatc */
8621 if (ret) {
8622 vsi->all_numtc = 0;
8623 vsi->all_enatc = 0;
8624 }
8625 /* resume VSI */
8626 ice_ena_vsi(vsi, true);
8627
8628 return ret;
8629}
8630
0d08a441
KP
8631static LIST_HEAD(ice_block_cb_list);
8632
8633static int
8634ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8635 void *type_data)
8636{
8637 struct ice_netdev_priv *np = netdev_priv(netdev);
fbc7b27a
KP
8638 struct ice_pf *pf = np->vsi->back;
8639 int err;
0d08a441
KP
8640
8641 switch (type) {
8642 case TC_SETUP_BLOCK:
8643 return flow_block_cb_setup_simple(type_data,
8644 &ice_block_cb_list,
8645 ice_setup_tc_block_cb,
8646 np, np, true);
fbc7b27a
KP
8647 case TC_SETUP_QDISC_MQPRIO:
8648 /* setup traffic classifier for receive side */
8649 mutex_lock(&pf->tc_mutex);
8650 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8651 mutex_unlock(&pf->tc_mutex);
8652 return err;
0d08a441
KP
8653 default:
8654 return -EOPNOTSUPP;
8655 }
8656 return -EOPNOTSUPP;
8657}
8658
195bb48f
MS
8659static struct ice_indr_block_priv *
8660ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8661 struct net_device *netdev)
8662{
8663 struct ice_indr_block_priv *cb_priv;
8664
8665 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8666 if (!cb_priv->netdev)
8667 return NULL;
8668 if (cb_priv->netdev == netdev)
8669 return cb_priv;
8670 }
8671 return NULL;
8672}
8673
8674static int
8675ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8676 void *indr_priv)
8677{
8678 struct ice_indr_block_priv *priv = indr_priv;
8679 struct ice_netdev_priv *np = priv->np;
8680
8681 switch (type) {
8682 case TC_SETUP_CLSFLOWER:
8683 return ice_setup_tc_cls_flower(np, priv->netdev,
8684 (struct flow_cls_offload *)
8685 type_data);
8686 default:
8687 return -EOPNOTSUPP;
8688 }
8689}
8690
8691static int
8692ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8693 struct ice_netdev_priv *np,
8694 struct flow_block_offload *f, void *data,
8695 void (*cleanup)(struct flow_block_cb *block_cb))
8696{
8697 struct ice_indr_block_priv *indr_priv;
8698 struct flow_block_cb *block_cb;
8699
9e300987
MS
8700 if (!ice_is_tunnel_supported(netdev) &&
8701 !(is_vlan_dev(netdev) &&
8702 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8703 return -EOPNOTSUPP;
8704
195bb48f
MS
8705 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8706 return -EOPNOTSUPP;
8707
8708 switch (f->command) {
8709 case FLOW_BLOCK_BIND:
8710 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8711 if (indr_priv)
8712 return -EEXIST;
8713
8714 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8715 if (!indr_priv)
8716 return -ENOMEM;
8717
8718 indr_priv->netdev = netdev;
8719 indr_priv->np = np;
8720 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8721
8722 block_cb =
8723 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8724 indr_priv, indr_priv,
8725 ice_rep_indr_tc_block_unbind,
8726 f, netdev, sch, data, np,
8727 cleanup);
8728
8729 if (IS_ERR(block_cb)) {
8730 list_del(&indr_priv->list);
8731 kfree(indr_priv);
8732 return PTR_ERR(block_cb);
8733 }
8734 flow_block_cb_add(block_cb, f);
8735 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8736 break;
8737 case FLOW_BLOCK_UNBIND:
8738 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8739 if (!indr_priv)
8740 return -ENOENT;
8741
8742 block_cb = flow_block_cb_lookup(f->block,
8743 ice_indr_setup_block_cb,
8744 indr_priv);
8745 if (!block_cb)
8746 return -ENOENT;
8747
8748 flow_indr_block_cb_remove(block_cb, f);
8749
8750 list_del(&block_cb->driver_list);
8751 break;
8752 default:
8753 return -EOPNOTSUPP;
8754 }
8755 return 0;
8756}
8757
8758static int
8759ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8760 void *cb_priv, enum tc_setup_type type, void *type_data,
8761 void *data,
8762 void (*cleanup)(struct flow_block_cb *block_cb))
8763{
8764 switch (type) {
8765 case TC_SETUP_BLOCK:
8766 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8767 data, cleanup);
8768
8769 default:
8770 return -EOPNOTSUPP;
8771 }
8772}
8773
cdedef59
AV
8774/**
8775 * ice_open - Called when a network interface becomes active
8776 * @netdev: network interface device structure
8777 *
8778 * The open entry point is called when a network interface is made
df17b7e0 8779 * active by the system (IFF_UP). At this point all resources needed
cdedef59
AV
8780 * for transmit and receive operations are allocated, the interrupt
8781 * handler is registered with the OS, the netdev watchdog is enabled,
8782 * and the stack is notified that the interface is ready.
8783 *
8784 * Returns 0 on success, negative value on failure
8785 */
0e674aeb 8786int ice_open(struct net_device *netdev)
e95fc857
KG
8787{
8788 struct ice_netdev_priv *np = netdev_priv(netdev);
8789 struct ice_pf *pf = np->vsi->back;
8790
8791 if (ice_is_reset_in_progress(pf->state)) {
8792 netdev_err(netdev, "can't open net device while reset is in progress");
8793 return -EBUSY;
8794 }
8795
8796 return ice_open_internal(netdev);
8797}
8798
8799/**
8800 * ice_open_internal - Called when a network interface becomes active
8801 * @netdev: network interface device structure
8802 *
8803 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8804 * handling routine
8805 *
8806 * Returns 0 on success, negative value on failure
8807 */
8808int ice_open_internal(struct net_device *netdev)
cdedef59
AV
8809{
8810 struct ice_netdev_priv *np = netdev_priv(netdev);
8811 struct ice_vsi *vsi = np->vsi;
de75135b 8812 struct ice_pf *pf = vsi->back;
6d599946 8813 struct ice_port_info *pi;
cdedef59
AV
8814 int err;
8815
7e408e07 8816 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
0f9d5027
AV
8817 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8818 return -EIO;
8819 }
8820
cdedef59
AV
8821 netif_carrier_off(netdev);
8822
6d599946 8823 pi = vsi->port_info;
2ccc1c1c
TN
8824 err = ice_update_link_info(pi);
8825 if (err) {
8826 netdev_err(netdev, "Failed to get link info, error %d\n", err);
c1484691 8827 return err;
b6f934f0 8828 }
cdedef59 8829
99d40752 8830 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
c77849f5 8831
6d599946
TN
8832 /* Set PHY if there is media, otherwise, turn off PHY */
8833 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1a3571b5 8834 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7e408e07 8835 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
1a3571b5
PG
8836 err = ice_init_phy_user_cfg(pi);
8837 if (err) {
8838 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
8839 err);
8840 return err;
8841 }
8842 }
8843
8844 err = ice_configure_phy(vsi);
6d599946 8845 if (err) {
19cce2c6 8846 netdev_err(netdev, "Failed to set physical link up, error %d\n",
6d599946
TN
8847 err);
8848 return err;
8849 }
8850 } else {
1a3571b5 8851 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
d348d517 8852 ice_set_link(vsi, false);
6d599946
TN
8853 }
8854
b6f934f0 8855 err = ice_vsi_open(vsi);
cdedef59
AV
8856 if (err)
8857 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8858 vsi->vsi_num, vsi->vsw->sw_id);
a4e82a81
TN
8859
8860 /* Update existing tunnels information */
8861 udp_tunnel_get_rx_info(netdev);
8862
cdedef59
AV
8863 return err;
8864}
8865
8866/**
8867 * ice_stop - Disables a network interface
8868 * @netdev: network interface device structure
8869 *
8870 * The stop entry point is called when an interface is de-activated by the OS,
df17b7e0 8871 * and the netdevice enters the DOWN state. The hardware is still under the
cdedef59
AV
8872 * driver's control, but the netdev interface is disabled.
8873 *
8874 * Returns success only - not allowed to fail
8875 */
0e674aeb 8876int ice_stop(struct net_device *netdev)
cdedef59
AV
8877{
8878 struct ice_netdev_priv *np = netdev_priv(netdev);
8879 struct ice_vsi *vsi = np->vsi;
e95fc857
KG
8880 struct ice_pf *pf = vsi->back;
8881
8882 if (ice_is_reset_in_progress(pf->state)) {
8883 netdev_err(netdev, "can't stop net device while reset is in progress");
8884 return -EBUSY;
8885 }
cdedef59
AV
8886
8887 ice_vsi_close(vsi);
8888
8889 return 0;
8890}
8891
e94d4478
AV
8892/**
8893 * ice_features_check - Validate encapsulated packet conforms to limits
8894 * @skb: skb buffer
8895 * @netdev: This port's netdev
8896 * @features: Offload features that the stack believes apply
8897 */
8898static netdev_features_t
8899ice_features_check(struct sk_buff *skb,
8900 struct net_device __always_unused *netdev,
8901 netdev_features_t features)
8902{
46b699c5 8903 bool gso = skb_is_gso(skb);
e94d4478
AV
8904 size_t len;
8905
8906 /* No point in doing any of this if neither checksum nor GSO are
df17b7e0 8907 * being requested for this frame. We can rule out both by just
e94d4478
AV
8908 * checking for CHECKSUM_PARTIAL
8909 */
8910 if (skb->ip_summed != CHECKSUM_PARTIAL)
8911 return features;
8912
8913 /* We cannot support GSO if the MSS is going to be less than
df17b7e0 8914 * 64 bytes. If it is then we need to drop support for GSO.
e94d4478 8915 */
46b699c5 8916 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
e94d4478
AV
8917 features &= ~NETIF_F_GSO_MASK;
8918
46b699c5 8919 len = skb_network_offset(skb);
a4e82a81 8920 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
e94d4478
AV
8921 goto out_rm_features;
8922
46b699c5 8923 len = skb_network_header_len(skb);
a4e82a81 8924 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
e94d4478
AV
8925 goto out_rm_features;
8926
8927 if (skb->encapsulation) {
46b699c5
JB
8928 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
8929 * the case of IPIP frames, the transport header pointer is
8930 * after the inner header! So check to make sure that this
8931 * is a GRE or UDP_TUNNEL frame before doing that math.
8932 */
8933 if (gso && (skb_shinfo(skb)->gso_type &
8934 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
8935 len = skb_inner_network_header(skb) -
8936 skb_transport_header(skb);
8937 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8938 goto out_rm_features;
8939 }
e94d4478 8940
46b699c5 8941 len = skb_inner_network_header_len(skb);
a4e82a81 8942 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
e94d4478
AV
8943 goto out_rm_features;
8944 }
8945
8946 return features;
8947out_rm_features:
8948 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8949}
8950
462acf6a
TN
8951static const struct net_device_ops ice_netdev_safe_mode_ops = {
8952 .ndo_open = ice_open,
8953 .ndo_stop = ice_stop,
8954 .ndo_start_xmit = ice_start_xmit,
8955 .ndo_set_mac_address = ice_set_mac_address,
8956 .ndo_validate_addr = eth_validate_addr,
8957 .ndo_change_mtu = ice_change_mtu,
8958 .ndo_get_stats64 = ice_get_stats64,
8959 .ndo_tx_timeout = ice_tx_timeout,
ebc5399e 8960 .ndo_bpf = ice_xdp_safe_mode,
462acf6a
TN
8961};
8962
cdedef59
AV
8963static const struct net_device_ops ice_netdev_ops = {
8964 .ndo_open = ice_open,
8965 .ndo_stop = ice_stop,
2b245cb2 8966 .ndo_start_xmit = ice_start_xmit,
2a87bd73 8967 .ndo_select_queue = ice_select_queue,
e94d4478 8968 .ndo_features_check = ice_features_check,
1babaf77 8969 .ndo_fix_features = ice_fix_features,
e94d4478
AV
8970 .ndo_set_rx_mode = ice_set_rx_mode,
8971 .ndo_set_mac_address = ice_set_mac_address,
8972 .ndo_validate_addr = eth_validate_addr,
8973 .ndo_change_mtu = ice_change_mtu,
fcea6f3d 8974 .ndo_get_stats64 = ice_get_stats64,
1ddef455 8975 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
a7605370 8976 .ndo_eth_ioctl = ice_eth_ioctl,
7c710869
AV
8977 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
8978 .ndo_set_vf_mac = ice_set_vf_mac,
8979 .ndo_get_vf_config = ice_get_vf_cfg,
8980 .ndo_set_vf_trust = ice_set_vf_trust,
8981 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
8982 .ndo_set_vf_link_state = ice_set_vf_link_state,
730fdea4 8983 .ndo_get_vf_stats = ice_get_vf_stats,
4ecc8633 8984 .ndo_set_vf_rate = ice_set_vf_bw,
d76a60ba
AV
8985 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8986 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
0d08a441 8987 .ndo_setup_tc = ice_setup_tc,
d76a60ba 8988 .ndo_set_features = ice_set_features,
b1edc14a
MFIP
8989 .ndo_bridge_getlink = ice_bridge_getlink,
8990 .ndo_bridge_setlink = ice_bridge_setlink,
e94d4478
AV
8991 .ndo_fdb_add = ice_fdb_add,
8992 .ndo_fdb_del = ice_fdb_del,
28bf2672
BC
8993#ifdef CONFIG_RFS_ACCEL
8994 .ndo_rx_flow_steer = ice_rx_flow_steer,
8995#endif
b3969fd7 8996 .ndo_tx_timeout = ice_tx_timeout,
efc2214b
MF
8997 .ndo_bpf = ice_xdp,
8998 .ndo_xdp_xmit = ice_xdp_xmit,
2d4238f5 8999 .ndo_xsk_wakeup = ice_xsk_wakeup,
4b889474 9000 .ndo_get_devlink_port = ice_get_devlink_port,
cdedef59 9001};