1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions
[IONIC_QTYPE_MAX
] = {
27 [IONIC_QTYPE_ADMINQ
] = 0, /* 0 = Base version with CQ support */
28 [IONIC_QTYPE_NOTIFYQ
] = 0, /* 0 = Base version */
29 [IONIC_QTYPE_RXQ
] = 2, /* 0 = Base version with CQ+SG support
30 * 2 = ... with CMB rings
32 [IONIC_QTYPE_TXQ
] = 3, /* 0 = Base version with CQ+SG support
33 * 1 = ... with Tx SG version 1
34 * 3 = ... with CMB rings
38 static void ionic_link_status_check(struct ionic_lif
*lif
);
39 static void ionic_lif_handle_fw_down(struct ionic_lif
*lif
);
40 static void ionic_lif_handle_fw_up(struct ionic_lif
*lif
);
41 static void ionic_lif_set_netdev_info(struct ionic_lif
*lif
);
43 static void ionic_txrx_deinit(struct ionic_lif
*lif
);
44 static int ionic_txrx_init(struct ionic_lif
*lif
);
45 static int ionic_start_queues(struct ionic_lif
*lif
);
46 static void ionic_stop_queues(struct ionic_lif
*lif
);
47 static void ionic_lif_queue_identify(struct ionic_lif
*lif
);
49 static int ionic_xdp_queues_config(struct ionic_lif
*lif
);
50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue
*q
);
52 static void ionic_dim_work(struct work_struct
*work
)
54 struct dim
*dim
= container_of(work
, struct dim
, work
);
55 struct ionic_intr_info
*intr
;
56 struct dim_cq_moder cur_moder
;
57 struct ionic_qcq
*qcq
;
58 struct ionic_lif
*lif
;
61 cur_moder
= net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
62 qcq
= container_of(dim
, struct ionic_qcq
, dim
);
64 new_coal
= ionic_coal_usec_to_hw(lif
->ionic
, cur_moder
.usec
);
65 new_coal
= new_coal
? new_coal
: 1;
68 if (intr
->dim_coal_hw
!= new_coal
) {
69 intr
->dim_coal_hw
= new_coal
;
71 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
72 intr
->index
, intr
->dim_coal_hw
);
75 dim
->state
= DIM_START_MEASURE
;
78 static void ionic_lif_deferred_work(struct work_struct
*work
)
80 struct ionic_lif
*lif
= container_of(work
, struct ionic_lif
, deferred
.work
);
81 struct ionic_deferred
*def
= &lif
->deferred
;
82 struct ionic_deferred_work
*w
= NULL
;
85 spin_lock_bh(&def
->lock
);
86 if (!list_empty(&def
->list
)) {
87 w
= list_first_entry(&def
->list
,
88 struct ionic_deferred_work
, list
);
91 spin_unlock_bh(&def
->lock
);
97 case IONIC_DW_TYPE_RX_MODE
:
98 ionic_lif_rx_mode(lif
);
100 case IONIC_DW_TYPE_LINK_STATUS
:
101 ionic_link_status_check(lif
);
103 case IONIC_DW_TYPE_LIF_RESET
:
105 ionic_lif_handle_fw_up(lif
);
107 ionic_lif_handle_fw_down(lif
);
109 /* Fire off another watchdog to see
110 * if the FW is already back rather than
111 * waiting another whole cycle
113 mod_timer(&lif
->ionic
->watchdog_timer
, jiffies
+ 1);
124 void ionic_lif_deferred_enqueue(struct ionic_deferred
*def
,
125 struct ionic_deferred_work
*work
)
127 spin_lock_bh(&def
->lock
);
128 list_add_tail(&work
->list
, &def
->list
);
129 spin_unlock_bh(&def
->lock
);
130 schedule_work(&def
->work
);
133 static void ionic_link_status_check(struct ionic_lif
*lif
)
135 struct net_device
*netdev
= lif
->netdev
;
139 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
))
142 /* Don't put carrier back up if we're in a broken state */
143 if (test_bit(IONIC_LIF_F_BROKEN
, lif
->state
)) {
144 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
);
148 link_status
= le16_to_cpu(lif
->info
->status
.link_status
);
149 link_up
= link_status
== IONIC_PORT_OPER_STATUS_UP
;
154 if (netdev
->flags
& IFF_UP
&& netif_running(netdev
)) {
155 mutex_lock(&lif
->queue_lock
);
156 err
= ionic_start_queues(lif
);
157 if (err
&& err
!= -EBUSY
) {
159 "Failed to start queues: %d\n", err
);
160 set_bit(IONIC_LIF_F_BROKEN
, lif
->state
);
161 netif_carrier_off(lif
->netdev
);
163 mutex_unlock(&lif
->queue_lock
);
166 if (!err
&& !netif_carrier_ok(netdev
)) {
167 ionic_port_identify(lif
->ionic
);
168 netdev_info(netdev
, "Link up - %d Gbps\n",
169 le32_to_cpu(lif
->info
->status
.link_speed
) / 1000);
170 netif_carrier_on(netdev
);
173 if (netif_carrier_ok(netdev
)) {
174 lif
->link_down_count
++;
175 netdev_info(netdev
, "Link down\n");
176 netif_carrier_off(netdev
);
179 if (netdev
->flags
& IFF_UP
&& netif_running(netdev
)) {
180 mutex_lock(&lif
->queue_lock
);
181 ionic_stop_queues(lif
);
182 mutex_unlock(&lif
->queue_lock
);
186 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
);
189 void ionic_link_status_check_request(struct ionic_lif
*lif
, bool can_sleep
)
191 struct ionic_deferred_work
*work
;
193 /* we only need one request outstanding at a time */
194 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
))
198 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
200 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
);
204 work
->type
= IONIC_DW_TYPE_LINK_STATUS
;
205 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
207 ionic_link_status_check(lif
);
211 static void ionic_napi_deadline(struct timer_list
*timer
)
213 struct ionic_qcq
*qcq
= container_of(timer
, struct ionic_qcq
, napi_deadline
);
215 napi_schedule(&qcq
->napi
);
218 static irqreturn_t
ionic_isr(int irq
, void *data
)
220 struct napi_struct
*napi
= data
;
222 napi_schedule_irqoff(napi
);
227 static int ionic_request_irq(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
229 struct ionic_intr_info
*intr
= &qcq
->intr
;
230 struct device
*dev
= lif
->ionic
->dev
;
231 struct ionic_queue
*q
= &qcq
->q
;
235 name
= lif
->netdev
->name
;
237 name
= dev_name(dev
);
239 snprintf(intr
->name
, sizeof(intr
->name
),
240 "%s-%s-%s", IONIC_DRV_NAME
, name
, q
->name
);
242 return devm_request_irq(dev
, intr
->vector
, ionic_isr
,
243 0, intr
->name
, &qcq
->napi
);
246 static int ionic_intr_alloc(struct ionic_lif
*lif
, struct ionic_intr_info
*intr
)
248 struct ionic
*ionic
= lif
->ionic
;
251 index
= find_first_zero_bit(ionic
->intrs
, ionic
->nintrs
);
252 if (index
== ionic
->nintrs
) {
253 netdev_warn(lif
->netdev
, "%s: no intr, index=%d nintrs=%d\n",
254 __func__
, index
, ionic
->nintrs
);
258 set_bit(index
, ionic
->intrs
);
259 ionic_intr_init(&ionic
->idev
, intr
, index
);
264 static void ionic_intr_free(struct ionic
*ionic
, int index
)
266 if (index
!= IONIC_INTR_INDEX_NOT_ASSIGNED
&& index
< ionic
->nintrs
)
267 clear_bit(index
, ionic
->intrs
);
270 static int ionic_qcq_enable(struct ionic_qcq
*qcq
)
272 struct ionic_queue
*q
= &qcq
->q
;
273 struct ionic_lif
*lif
= q
->lif
;
274 struct ionic_dev
*idev
;
277 struct ionic_admin_ctx ctx
= {
278 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
280 .opcode
= IONIC_CMD_Q_CONTROL
,
281 .lif_index
= cpu_to_le16(lif
->index
),
283 .index
= cpu_to_le32(q
->index
),
284 .oper
= IONIC_Q_ENABLE
,
289 idev
= &lif
->ionic
->idev
;
290 dev
= lif
->ionic
->dev
;
292 dev_dbg(dev
, "q_enable.index %d q_enable.qtype %d\n",
293 ctx
.cmd
.q_control
.index
, ctx
.cmd
.q_control
.type
);
295 if (qcq
->flags
& IONIC_QCQ_F_INTR
)
296 ionic_intr_clean(idev
->intr_ctrl
, qcq
->intr
.index
);
298 ret
= ionic_adminq_post_wait(lif
, &ctx
);
303 napi_enable(&qcq
->napi
);
305 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
306 irq_set_affinity_hint(qcq
->intr
.vector
,
307 &qcq
->intr
.affinity_mask
);
308 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
309 IONIC_INTR_MASK_CLEAR
);
315 static int ionic_qcq_disable(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
, int fw_err
)
317 struct ionic_queue
*q
;
319 struct ionic_admin_ctx ctx
= {
320 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
322 .opcode
= IONIC_CMD_Q_CONTROL
,
323 .oper
= IONIC_Q_DISABLE
,
328 netdev_err(lif
->netdev
, "%s: bad qcq\n", __func__
);
334 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
335 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
337 cancel_work_sync(&qcq
->dim
.work
);
338 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
339 IONIC_INTR_MASK_SET
);
340 synchronize_irq(qcq
->intr
.vector
);
341 irq_set_affinity_hint(qcq
->intr
.vector
, NULL
);
342 napi_disable(&qcq
->napi
);
343 del_timer_sync(&qcq
->napi_deadline
);
346 /* If there was a previous fw communcation error, don't bother with
347 * sending the adminq command and just return the same error value.
349 if (fw_err
== -ETIMEDOUT
|| fw_err
== -ENXIO
)
352 ctx
.cmd
.q_control
.lif_index
= cpu_to_le16(lif
->index
);
353 ctx
.cmd
.q_control
.type
= q
->type
;
354 ctx
.cmd
.q_control
.index
= cpu_to_le32(q
->index
);
355 dev_dbg(lif
->ionic
->dev
, "q_disable.index %d q_disable.qtype %d\n",
356 ctx
.cmd
.q_control
.index
, ctx
.cmd
.q_control
.type
);
358 return ionic_adminq_post_wait(lif
, &ctx
);
361 static void ionic_lif_qcq_deinit(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
363 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
368 if (!(qcq
->flags
& IONIC_QCQ_F_INITED
))
371 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
372 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
373 IONIC_INTR_MASK_SET
);
374 netif_napi_del(&qcq
->napi
);
377 qcq
->flags
&= ~IONIC_QCQ_F_INITED
;
380 static void ionic_qcq_intr_free(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
382 if (!(qcq
->flags
& IONIC_QCQ_F_INTR
) || qcq
->intr
.vector
== 0)
385 irq_set_affinity_hint(qcq
->intr
.vector
, NULL
);
386 devm_free_irq(lif
->ionic
->dev
, qcq
->intr
.vector
, &qcq
->napi
);
387 qcq
->intr
.vector
= 0;
388 ionic_intr_free(lif
->ionic
, qcq
->intr
.index
);
389 qcq
->intr
.index
= IONIC_INTR_INDEX_NOT_ASSIGNED
;
392 static void ionic_qcq_free(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
394 struct device
*dev
= lif
->ionic
->dev
;
399 ionic_debugfs_del_qcq(qcq
);
402 dma_free_coherent(dev
, qcq
->q_size
, qcq
->q_base
, qcq
->q_base_pa
);
407 if (qcq
->cmb_q_base
) {
408 iounmap(qcq
->cmb_q_base
);
409 ionic_put_cmb(lif
, qcq
->cmb_pgid
, qcq
->cmb_order
);
412 qcq
->cmb_q_base
= NULL
;
413 qcq
->cmb_q_base_pa
= 0;
417 dma_free_coherent(dev
, qcq
->cq_size
, qcq
->cq_base
, qcq
->cq_base_pa
);
423 dma_free_coherent(dev
, qcq
->sg_size
, qcq
->sg_base
, qcq
->sg_base_pa
);
428 ionic_xdp_unregister_rxq_info(&qcq
->q
);
429 ionic_qcq_intr_free(lif
, qcq
);
437 void ionic_qcqs_free(struct ionic_lif
*lif
)
439 struct device
*dev
= lif
->ionic
->dev
;
440 struct ionic_qcq
*adminqcq
;
441 unsigned long irqflags
;
443 if (lif
->notifyqcq
) {
444 ionic_qcq_free(lif
, lif
->notifyqcq
);
445 devm_kfree(dev
, lif
->notifyqcq
);
446 lif
->notifyqcq
= NULL
;
450 spin_lock_irqsave(&lif
->adminq_lock
, irqflags
);
451 adminqcq
= READ_ONCE(lif
->adminqcq
);
452 lif
->adminqcq
= NULL
;
453 spin_unlock_irqrestore(&lif
->adminq_lock
, irqflags
);
455 ionic_qcq_free(lif
, adminqcq
);
456 devm_kfree(dev
, adminqcq
);
461 devm_kfree(dev
, lif
->rxqstats
);
462 lif
->rxqstats
= NULL
;
463 devm_kfree(dev
, lif
->rxqcqs
);
468 devm_kfree(dev
, lif
->txqstats
);
469 lif
->txqstats
= NULL
;
470 devm_kfree(dev
, lif
->txqcqs
);
475 static void ionic_link_qcq_interrupts(struct ionic_qcq
*src_qcq
,
476 struct ionic_qcq
*n_qcq
)
478 n_qcq
->intr
.vector
= src_qcq
->intr
.vector
;
479 n_qcq
->intr
.index
= src_qcq
->intr
.index
;
480 n_qcq
->napi_qcq
= src_qcq
->napi_qcq
;
483 static int ionic_alloc_qcq_interrupt(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
487 if (!(qcq
->flags
& IONIC_QCQ_F_INTR
)) {
488 qcq
->intr
.index
= IONIC_INTR_INDEX_NOT_ASSIGNED
;
492 err
= ionic_intr_alloc(lif
, &qcq
->intr
);
494 netdev_warn(lif
->netdev
, "no intr for %s: %d\n",
499 err
= ionic_bus_get_irq(lif
->ionic
, qcq
->intr
.index
);
501 netdev_warn(lif
->netdev
, "no vector for %s: %d\n",
503 goto err_out_free_intr
;
505 qcq
->intr
.vector
= err
;
506 ionic_intr_mask_assert(lif
->ionic
->idev
.intr_ctrl
, qcq
->intr
.index
,
507 IONIC_INTR_MASK_SET
);
509 err
= ionic_request_irq(lif
, qcq
);
511 netdev_warn(lif
->netdev
, "irq request failed %d\n", err
);
512 goto err_out_free_intr
;
515 /* try to get the irq on the local numa node first */
516 qcq
->intr
.cpu
= cpumask_local_spread(qcq
->intr
.index
,
517 dev_to_node(lif
->ionic
->dev
));
518 if (qcq
->intr
.cpu
!= -1)
519 cpumask_set_cpu(qcq
->intr
.cpu
, &qcq
->intr
.affinity_mask
);
521 netdev_dbg(lif
->netdev
, "%s: Interrupt index %d\n", qcq
->q
.name
, qcq
->intr
.index
);
525 ionic_intr_free(lif
->ionic
, qcq
->intr
.index
);
530 static int ionic_qcq_alloc(struct ionic_lif
*lif
, unsigned int type
,
532 const char *name
, unsigned int flags
,
533 unsigned int num_descs
, unsigned int desc_size
,
534 unsigned int cq_desc_size
,
535 unsigned int sg_desc_size
,
536 unsigned int pid
, struct ionic_qcq
**qcq
)
538 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
539 struct device
*dev
= lif
->ionic
->dev
;
540 void *q_base
, *cq_base
, *sg_base
;
541 dma_addr_t cq_base_pa
= 0;
542 dma_addr_t sg_base_pa
= 0;
543 dma_addr_t q_base_pa
= 0;
544 struct ionic_qcq
*new;
549 new = devm_kzalloc(dev
, sizeof(*new), GFP_KERNEL
);
551 netdev_err(lif
->netdev
, "Cannot allocate queue structure\n");
559 new->q
.info
= vcalloc(num_descs
, sizeof(*new->q
.info
));
561 netdev_err(lif
->netdev
, "Cannot allocate queue info\n");
563 goto err_out_free_qcq
;
567 new->q
.max_sg_elems
= lif
->qtype_info
[type
].max_sg_elems
;
569 err
= ionic_q_init(lif
, idev
, &new->q
, index
, name
, num_descs
,
570 desc_size
, sg_desc_size
, pid
);
572 netdev_err(lif
->netdev
, "Cannot initialize queue\n");
573 goto err_out_free_q_info
;
576 err
= ionic_alloc_qcq_interrupt(lif
, new);
580 new->cq
.info
= vcalloc(num_descs
, sizeof(*new->cq
.info
));
582 netdev_err(lif
->netdev
, "Cannot allocate completion queue info\n");
584 goto err_out_free_irq
;
587 err
= ionic_cq_init(lif
, &new->cq
, &new->intr
, num_descs
, cq_desc_size
);
589 netdev_err(lif
->netdev
, "Cannot initialize completion queue\n");
590 goto err_out_free_cq_info
;
593 if (flags
& IONIC_QCQ_F_NOTIFYQ
) {
596 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
597 * and don't alloc qc. We leave new->qc_size and new->qc_base
598 * as 0 to be sure we don't try to free it later.
600 q_size
= ALIGN(num_descs
* desc_size
, PAGE_SIZE
);
601 new->q_size
= PAGE_SIZE
+ q_size
+
602 ALIGN(num_descs
* cq_desc_size
, PAGE_SIZE
);
603 new->q_base
= dma_alloc_coherent(dev
, new->q_size
,
604 &new->q_base_pa
, GFP_KERNEL
);
606 netdev_err(lif
->netdev
, "Cannot allocate qcq DMA memory\n");
608 goto err_out_free_cq_info
;
610 q_base
= PTR_ALIGN(new->q_base
, PAGE_SIZE
);
611 q_base_pa
= ALIGN(new->q_base_pa
, PAGE_SIZE
);
612 ionic_q_map(&new->q
, q_base
, q_base_pa
);
614 cq_base
= PTR_ALIGN(q_base
+ q_size
, PAGE_SIZE
);
615 cq_base_pa
= ALIGN(new->q_base_pa
+ q_size
, PAGE_SIZE
);
616 ionic_cq_map(&new->cq
, cq_base
, cq_base_pa
);
617 ionic_cq_bind(&new->cq
, &new->q
);
619 /* regular DMA q descriptors */
620 new->q_size
= PAGE_SIZE
+ (num_descs
* desc_size
);
621 new->q_base
= dma_alloc_coherent(dev
, new->q_size
, &new->q_base_pa
,
624 netdev_err(lif
->netdev
, "Cannot allocate queue DMA memory\n");
626 goto err_out_free_cq_info
;
628 q_base
= PTR_ALIGN(new->q_base
, PAGE_SIZE
);
629 q_base_pa
= ALIGN(new->q_base_pa
, PAGE_SIZE
);
630 ionic_q_map(&new->q
, q_base
, q_base_pa
);
632 if (flags
& IONIC_QCQ_F_CMB_RINGS
) {
633 /* on-chip CMB q descriptors */
634 new->cmb_q_size
= num_descs
* desc_size
;
635 new->cmb_order
= order_base_2(new->cmb_q_size
/ PAGE_SIZE
);
637 err
= ionic_get_cmb(lif
, &new->cmb_pgid
, &new->cmb_q_base_pa
,
640 netdev_err(lif
->netdev
,
641 "Cannot allocate queue order %d from cmb: err %d\n",
642 new->cmb_order
, err
);
646 new->cmb_q_base
= ioremap_wc(new->cmb_q_base_pa
, new->cmb_q_size
);
647 if (!new->cmb_q_base
) {
648 netdev_err(lif
->netdev
, "Cannot map queue from cmb\n");
649 ionic_put_cmb(lif
, new->cmb_pgid
, new->cmb_order
);
654 new->cmb_q_base_pa
-= idev
->phy_cmb_pages
;
655 ionic_q_cmb_map(&new->q
, new->cmb_q_base
, new->cmb_q_base_pa
);
658 /* cq DMA descriptors */
659 new->cq_size
= PAGE_SIZE
+ (num_descs
* cq_desc_size
);
660 new->cq_base
= dma_alloc_coherent(dev
, new->cq_size
, &new->cq_base_pa
,
663 netdev_err(lif
->netdev
, "Cannot allocate cq DMA memory\n");
667 cq_base
= PTR_ALIGN(new->cq_base
, PAGE_SIZE
);
668 cq_base_pa
= ALIGN(new->cq_base_pa
, PAGE_SIZE
);
669 ionic_cq_map(&new->cq
, cq_base
, cq_base_pa
);
670 ionic_cq_bind(&new->cq
, &new->q
);
673 if (flags
& IONIC_QCQ_F_SG
) {
674 new->sg_size
= PAGE_SIZE
+ (num_descs
* sg_desc_size
);
675 new->sg_base
= dma_alloc_coherent(dev
, new->sg_size
, &new->sg_base_pa
,
678 netdev_err(lif
->netdev
, "Cannot allocate sg DMA memory\n");
680 goto err_out_free_cq
;
682 sg_base
= PTR_ALIGN(new->sg_base
, PAGE_SIZE
);
683 sg_base_pa
= ALIGN(new->sg_base_pa
, PAGE_SIZE
);
684 ionic_q_sg_map(&new->q
, sg_base
, sg_base_pa
);
687 INIT_WORK(&new->dim
.work
, ionic_dim_work
);
688 new->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
695 dma_free_coherent(dev
, new->cq_size
, new->cq_base
, new->cq_base_pa
);
697 if (new->cmb_q_base
) {
698 iounmap(new->cmb_q_base
);
699 ionic_put_cmb(lif
, new->cmb_pgid
, new->cmb_order
);
701 dma_free_coherent(dev
, new->q_size
, new->q_base
, new->q_base_pa
);
702 err_out_free_cq_info
:
705 if (flags
& IONIC_QCQ_F_INTR
) {
706 devm_free_irq(dev
, new->intr
.vector
, &new->napi
);
707 ionic_intr_free(lif
->ionic
, new->intr
.index
);
712 devm_kfree(dev
, new);
714 dev_err(dev
, "qcq alloc of %s%d failed %d\n", name
, index
, err
);
718 static int ionic_qcqs_alloc(struct ionic_lif
*lif
)
720 struct device
*dev
= lif
->ionic
->dev
;
724 flags
= IONIC_QCQ_F_INTR
;
725 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_ADMINQ
, 0, "admin", flags
,
727 sizeof(struct ionic_admin_cmd
),
728 sizeof(struct ionic_admin_comp
),
729 0, lif
->kern_pid
, &lif
->adminqcq
);
732 ionic_debugfs_add_qcq(lif
, lif
->adminqcq
);
734 if (lif
->ionic
->nnqs_per_lif
) {
735 flags
= IONIC_QCQ_F_NOTIFYQ
;
736 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_NOTIFYQ
, 0, "notifyq",
737 flags
, IONIC_NOTIFYQ_LENGTH
,
738 sizeof(struct ionic_notifyq_cmd
),
739 sizeof(union ionic_notifyq_comp
),
740 0, lif
->kern_pid
, &lif
->notifyqcq
);
743 ionic_debugfs_add_qcq(lif
, lif
->notifyqcq
);
745 /* Let the notifyq ride on the adminq interrupt */
746 ionic_link_qcq_interrupts(lif
->adminqcq
, lif
->notifyqcq
);
750 lif
->txqcqs
= devm_kcalloc(dev
, lif
->ionic
->ntxqs_per_lif
,
751 sizeof(*lif
->txqcqs
), GFP_KERNEL
);
754 lif
->rxqcqs
= devm_kcalloc(dev
, lif
->ionic
->nrxqs_per_lif
,
755 sizeof(*lif
->rxqcqs
), GFP_KERNEL
);
759 lif
->txqstats
= devm_kcalloc(dev
, lif
->ionic
->ntxqs_per_lif
+ 1,
760 sizeof(*lif
->txqstats
), GFP_KERNEL
);
763 lif
->rxqstats
= devm_kcalloc(dev
, lif
->ionic
->nrxqs_per_lif
+ 1,
764 sizeof(*lif
->rxqstats
), GFP_KERNEL
);
771 ionic_qcqs_free(lif
);
775 static void ionic_qcq_sanitize(struct ionic_qcq
*qcq
)
779 qcq
->cq
.tail_idx
= 0;
780 qcq
->cq
.done_color
= 1;
781 memset(qcq
->q_base
, 0, qcq
->q_size
);
783 memset_io(qcq
->cmb_q_base
, 0, qcq
->cmb_q_size
);
784 memset(qcq
->cq_base
, 0, qcq
->cq_size
);
785 memset(qcq
->sg_base
, 0, qcq
->sg_size
);
788 static int ionic_lif_txq_init(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
790 struct device
*dev
= lif
->ionic
->dev
;
791 struct ionic_queue
*q
= &qcq
->q
;
792 struct ionic_cq
*cq
= &qcq
->cq
;
793 struct ionic_admin_ctx ctx
= {
794 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
796 .opcode
= IONIC_CMD_Q_INIT
,
797 .lif_index
= cpu_to_le16(lif
->index
),
799 .ver
= lif
->qtype_info
[q
->type
].version
,
800 .index
= cpu_to_le32(q
->index
),
801 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
|
803 .intr_index
= cpu_to_le16(qcq
->intr
.index
),
804 .pid
= cpu_to_le16(q
->pid
),
805 .ring_size
= ilog2(q
->num_descs
),
806 .ring_base
= cpu_to_le64(q
->base_pa
),
807 .cq_ring_base
= cpu_to_le64(cq
->base_pa
),
808 .sg_ring_base
= cpu_to_le64(q
->sg_base_pa
),
809 .features
= cpu_to_le64(q
->features
),
814 if (qcq
->flags
& IONIC_QCQ_F_CMB_RINGS
) {
815 ctx
.cmd
.q_init
.flags
|= cpu_to_le16(IONIC_QINIT_F_CMB
);
816 ctx
.cmd
.q_init
.ring_base
= cpu_to_le64(qcq
->cmb_q_base_pa
);
819 dev_dbg(dev
, "txq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
820 dev_dbg(dev
, "txq_init.index %d\n", ctx
.cmd
.q_init
.index
);
821 dev_dbg(dev
, "txq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
822 dev_dbg(dev
, "txq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
823 dev_dbg(dev
, "txq_init.cq_ring_base 0x%llx\n", ctx
.cmd
.q_init
.cq_ring_base
);
824 dev_dbg(dev
, "txq_init.sg_ring_base 0x%llx\n", ctx
.cmd
.q_init
.sg_ring_base
);
825 dev_dbg(dev
, "txq_init.flags 0x%x\n", ctx
.cmd
.q_init
.flags
);
826 dev_dbg(dev
, "txq_init.ver %d\n", ctx
.cmd
.q_init
.ver
);
827 dev_dbg(dev
, "txq_init.intr_index %d\n", ctx
.cmd
.q_init
.intr_index
);
829 ionic_qcq_sanitize(qcq
);
831 err
= ionic_adminq_post_wait(lif
, &ctx
);
835 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
836 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
837 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
839 dev_dbg(dev
, "txq->hw_type %d\n", q
->hw_type
);
840 dev_dbg(dev
, "txq->hw_index %d\n", q
->hw_index
);
842 q
->dbell_deadline
= IONIC_TX_DOORBELL_DEADLINE
;
843 q
->dbell_jiffies
= jiffies
;
845 if (test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
)) {
846 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_tx_napi
);
848 timer_setup(&qcq
->napi_deadline
, ionic_napi_deadline
, 0);
851 qcq
->flags
|= IONIC_QCQ_F_INITED
;
856 static int ionic_lif_rxq_init(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
858 struct device
*dev
= lif
->ionic
->dev
;
859 struct ionic_queue
*q
= &qcq
->q
;
860 struct ionic_cq
*cq
= &qcq
->cq
;
861 struct ionic_admin_ctx ctx
= {
862 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
864 .opcode
= IONIC_CMD_Q_INIT
,
865 .lif_index
= cpu_to_le16(lif
->index
),
867 .ver
= lif
->qtype_info
[q
->type
].version
,
868 .index
= cpu_to_le32(q
->index
),
869 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
),
870 .intr_index
= cpu_to_le16(cq
->bound_intr
->index
),
871 .pid
= cpu_to_le16(q
->pid
),
872 .ring_size
= ilog2(q
->num_descs
),
873 .ring_base
= cpu_to_le64(q
->base_pa
),
874 .cq_ring_base
= cpu_to_le64(cq
->base_pa
),
875 .sg_ring_base
= cpu_to_le64(q
->sg_base_pa
),
876 .features
= cpu_to_le64(q
->features
),
881 q
->partner
= &lif
->txqcqs
[q
->index
]->q
;
882 q
->partner
->partner
= q
;
884 if (!lif
->xdp_prog
||
885 (lif
->xdp_prog
->aux
&& lif
->xdp_prog
->aux
->xdp_has_frags
))
886 ctx
.cmd
.q_init
.flags
|= cpu_to_le16(IONIC_QINIT_F_SG
);
888 if (qcq
->flags
& IONIC_QCQ_F_CMB_RINGS
) {
889 ctx
.cmd
.q_init
.flags
|= cpu_to_le16(IONIC_QINIT_F_CMB
);
890 ctx
.cmd
.q_init
.ring_base
= cpu_to_le64(qcq
->cmb_q_base_pa
);
893 dev_dbg(dev
, "rxq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
894 dev_dbg(dev
, "rxq_init.index %d\n", ctx
.cmd
.q_init
.index
);
895 dev_dbg(dev
, "rxq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
896 dev_dbg(dev
, "rxq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
897 dev_dbg(dev
, "rxq_init.flags 0x%x\n", ctx
.cmd
.q_init
.flags
);
898 dev_dbg(dev
, "rxq_init.ver %d\n", ctx
.cmd
.q_init
.ver
);
899 dev_dbg(dev
, "rxq_init.intr_index %d\n", ctx
.cmd
.q_init
.intr_index
);
901 ionic_qcq_sanitize(qcq
);
903 err
= ionic_adminq_post_wait(lif
, &ctx
);
907 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
908 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
909 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
911 dev_dbg(dev
, "rxq->hw_type %d\n", q
->hw_type
);
912 dev_dbg(dev
, "rxq->hw_index %d\n", q
->hw_index
);
914 q
->dbell_deadline
= IONIC_RX_MIN_DOORBELL_DEADLINE
;
915 q
->dbell_jiffies
= jiffies
;
917 if (test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
))
918 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_rx_napi
);
920 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_txrx_napi
);
923 timer_setup(&qcq
->napi_deadline
, ionic_napi_deadline
, 0);
925 qcq
->flags
|= IONIC_QCQ_F_INITED
;
930 int ionic_lif_create_hwstamp_txq(struct ionic_lif
*lif
)
932 unsigned int num_desc
, desc_sz
, comp_sz
, sg_desc_sz
;
933 unsigned int txq_i
, flags
;
934 struct ionic_qcq
*txq
;
938 if (lif
->hwstamp_txq
)
941 features
= IONIC_Q_F_2X_CQ_DESC
| IONIC_TXQ_F_HWSTAMP
;
943 num_desc
= IONIC_MIN_TXRX_DESC
;
944 desc_sz
= sizeof(struct ionic_txq_desc
);
945 comp_sz
= 2 * sizeof(struct ionic_txq_comp
);
947 if (lif
->qtype_info
[IONIC_QTYPE_TXQ
].version
>= 1 &&
948 lif
->qtype_info
[IONIC_QTYPE_TXQ
].sg_desc_sz
== sizeof(struct ionic_txq_sg_desc_v1
))
949 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc_v1
);
951 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc
);
953 txq_i
= lif
->ionic
->ntxqs_per_lif
;
954 flags
= IONIC_QCQ_F_TX_STATS
| IONIC_QCQ_F_SG
;
956 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_TXQ
, txq_i
, "hwstamp_tx", flags
,
957 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
958 lif
->kern_pid
, &txq
);
962 txq
->q
.features
= features
;
964 ionic_link_qcq_interrupts(lif
->adminqcq
, txq
);
965 ionic_debugfs_add_qcq(lif
, txq
);
967 lif
->hwstamp_txq
= txq
;
969 if (netif_running(lif
->netdev
)) {
970 err
= ionic_lif_txq_init(lif
, txq
);
974 if (test_bit(IONIC_LIF_F_UP
, lif
->state
)) {
975 err
= ionic_qcq_enable(txq
);
984 ionic_lif_qcq_deinit(lif
, txq
);
986 lif
->hwstamp_txq
= NULL
;
987 ionic_debugfs_del_qcq(txq
);
988 ionic_qcq_free(lif
, txq
);
989 devm_kfree(lif
->ionic
->dev
, txq
);
994 int ionic_lif_create_hwstamp_rxq(struct ionic_lif
*lif
)
996 unsigned int num_desc
, desc_sz
, comp_sz
, sg_desc_sz
;
997 unsigned int rxq_i
, flags
;
998 struct ionic_qcq
*rxq
;
1002 if (lif
->hwstamp_rxq
)
1005 features
= IONIC_Q_F_2X_CQ_DESC
| IONIC_RXQ_F_HWSTAMP
;
1007 num_desc
= IONIC_MIN_TXRX_DESC
;
1008 desc_sz
= sizeof(struct ionic_rxq_desc
);
1009 comp_sz
= 2 * sizeof(struct ionic_rxq_comp
);
1010 sg_desc_sz
= sizeof(struct ionic_rxq_sg_desc
);
1012 rxq_i
= lif
->ionic
->nrxqs_per_lif
;
1013 flags
= IONIC_QCQ_F_RX_STATS
| IONIC_QCQ_F_SG
;
1015 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_RXQ
, rxq_i
, "hwstamp_rx", flags
,
1016 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
1017 lif
->kern_pid
, &rxq
);
1021 rxq
->q
.features
= features
;
1023 ionic_link_qcq_interrupts(lif
->adminqcq
, rxq
);
1024 ionic_debugfs_add_qcq(lif
, rxq
);
1026 lif
->hwstamp_rxq
= rxq
;
1028 if (netif_running(lif
->netdev
)) {
1029 err
= ionic_lif_rxq_init(lif
, rxq
);
1033 if (test_bit(IONIC_LIF_F_UP
, lif
->state
)) {
1034 ionic_rx_fill(&rxq
->q
);
1035 err
= ionic_qcq_enable(rxq
);
1037 goto err_qcq_enable
;
1044 ionic_lif_qcq_deinit(lif
, rxq
);
1046 lif
->hwstamp_rxq
= NULL
;
1047 ionic_debugfs_del_qcq(rxq
);
1048 ionic_qcq_free(lif
, rxq
);
1049 devm_kfree(lif
->ionic
->dev
, rxq
);
1054 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif
*lif
, bool rx_all
)
1056 struct ionic_queue_params qparam
;
1058 ionic_init_queue_params(lif
, &qparam
);
1061 qparam
.rxq_features
= IONIC_Q_F_2X_CQ_DESC
| IONIC_RXQ_F_HWSTAMP
;
1063 qparam
.rxq_features
= 0;
1065 /* if we're not running, just set the values and return */
1066 if (!netif_running(lif
->netdev
)) {
1067 lif
->rxq_features
= qparam
.rxq_features
;
1071 return ionic_reconfigure_queues(lif
, &qparam
);
1074 int ionic_lif_set_hwstamp_txmode(struct ionic_lif
*lif
, u16 txstamp_mode
)
1076 struct ionic_admin_ctx ctx
= {
1077 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1078 .cmd
.lif_setattr
= {
1079 .opcode
= IONIC_CMD_LIF_SETATTR
,
1080 .index
= cpu_to_le16(lif
->index
),
1081 .attr
= IONIC_LIF_ATTR_TXSTAMP
,
1082 .txstamp_mode
= cpu_to_le16(txstamp_mode
),
1086 return ionic_adminq_post_wait(lif
, &ctx
);
1089 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif
*lif
)
1091 struct ionic_admin_ctx ctx
= {
1092 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1093 .cmd
.rx_filter_del
= {
1094 .opcode
= IONIC_CMD_RX_FILTER_DEL
,
1095 .lif_index
= cpu_to_le16(lif
->index
),
1098 struct ionic_rx_filter
*f
;
1102 spin_lock_bh(&lif
->rx_filters
.lock
);
1104 f
= ionic_rx_filter_rxsteer(lif
);
1106 spin_unlock_bh(&lif
->rx_filters
.lock
);
1110 filter_id
= f
->filter_id
;
1111 ionic_rx_filter_free(lif
, f
);
1113 spin_unlock_bh(&lif
->rx_filters
.lock
);
1115 netdev_dbg(lif
->netdev
, "rx_filter del RXSTEER (id %d)\n", filter_id
);
1117 ctx
.cmd
.rx_filter_del
.filter_id
= cpu_to_le32(filter_id
);
1119 err
= ionic_adminq_post_wait(lif
, &ctx
);
1120 if (err
&& err
!= -EEXIST
)
1121 netdev_dbg(lif
->netdev
, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id
);
1124 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif
*lif
, u64 pkt_class
)
1126 struct ionic_admin_ctx ctx
= {
1127 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1128 .cmd
.rx_filter_add
= {
1129 .opcode
= IONIC_CMD_RX_FILTER_ADD
,
1130 .lif_index
= cpu_to_le16(lif
->index
),
1131 .match
= cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS
),
1132 .pkt_class
= cpu_to_le64(pkt_class
),
1139 if (!lif
->hwstamp_rxq
)
1142 qtype
= lif
->hwstamp_rxq
->q
.type
;
1143 ctx
.cmd
.rx_filter_add
.qtype
= qtype
;
1145 qid
= lif
->hwstamp_rxq
->q
.index
;
1146 ctx
.cmd
.rx_filter_add
.qid
= cpu_to_le32(qid
);
1148 netdev_dbg(lif
->netdev
, "rx_filter add RXSTEER\n");
1149 err
= ionic_adminq_post_wait(lif
, &ctx
);
1150 if (err
&& err
!= -EEXIST
)
1153 spin_lock_bh(&lif
->rx_filters
.lock
);
1154 err
= ionic_rx_filter_save(lif
, 0, qid
, 0, &ctx
, IONIC_FILTER_STATE_SYNCED
);
1155 spin_unlock_bh(&lif
->rx_filters
.lock
);
1160 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif
*lif
, u64 pkt_class
)
1162 ionic_lif_del_hwstamp_rxfilt(lif
);
1167 return ionic_lif_add_hwstamp_rxfilt(lif
, pkt_class
);
1170 static bool ionic_notifyq_service(struct ionic_cq
*cq
,
1171 struct ionic_cq_info
*cq_info
)
1173 union ionic_notifyq_comp
*comp
= cq_info
->cq_desc
;
1174 struct ionic_deferred_work
*work
;
1175 struct net_device
*netdev
;
1176 struct ionic_queue
*q
;
1177 struct ionic_lif
*lif
;
1181 lif
= q
->info
[0].cb_arg
;
1182 netdev
= lif
->netdev
;
1183 eid
= le64_to_cpu(comp
->event
.eid
);
1185 /* Have we run out of new completions to process? */
1186 if ((s64
)(eid
- lif
->last_eid
) <= 0)
1189 lif
->last_eid
= eid
;
1191 dev_dbg(lif
->ionic
->dev
, "notifyq event:\n");
1192 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET
, 16, 1,
1193 comp
, sizeof(*comp
), true);
1195 switch (le16_to_cpu(comp
->event
.ecode
)) {
1196 case IONIC_EVENT_LINK_CHANGE
:
1197 ionic_link_status_check_request(lif
, CAN_NOT_SLEEP
);
1199 case IONIC_EVENT_RESET
:
1200 if (lif
->ionic
->idev
.fw_status_ready
&&
1201 !test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
) &&
1202 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING
, lif
->state
)) {
1203 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1205 netdev_err(lif
->netdev
, "Reset event dropped\n");
1206 clear_bit(IONIC_LIF_F_FW_STOPPING
, lif
->state
);
1208 work
->type
= IONIC_DW_TYPE_LIF_RESET
;
1209 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
1214 netdev_warn(netdev
, "Notifyq event ecode=%d eid=%lld\n",
1215 comp
->event
.ecode
, eid
);
1222 static bool ionic_adminq_service(struct ionic_cq
*cq
,
1223 struct ionic_cq_info
*cq_info
)
1225 struct ionic_admin_comp
*comp
= cq_info
->cq_desc
;
1227 if (!color_match(comp
->color
, cq
->done_color
))
1230 ionic_q_service(cq
->bound_q
, cq_info
, le16_to_cpu(comp
->comp_index
));
1235 static int ionic_adminq_napi(struct napi_struct
*napi
, int budget
)
1237 struct ionic_intr_info
*intr
= napi_to_cq(napi
)->bound_intr
;
1238 struct ionic_lif
*lif
= napi_to_cq(napi
)->lif
;
1239 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
1240 unsigned long irqflags
;
1241 unsigned int flags
= 0;
1242 bool resched
= false;
1250 if (lif
->notifyqcq
&& lif
->notifyqcq
->flags
& IONIC_QCQ_F_INITED
)
1251 n_work
= ionic_cq_service(&lif
->notifyqcq
->cq
, budget
,
1252 ionic_notifyq_service
, NULL
, NULL
);
1254 spin_lock_irqsave(&lif
->adminq_lock
, irqflags
);
1255 if (lif
->adminqcq
&& lif
->adminqcq
->flags
& IONIC_QCQ_F_INITED
)
1256 a_work
= ionic_cq_service(&lif
->adminqcq
->cq
, budget
,
1257 ionic_adminq_service
, NULL
, NULL
);
1258 spin_unlock_irqrestore(&lif
->adminq_lock
, irqflags
);
1260 if (lif
->hwstamp_rxq
)
1261 rx_work
= ionic_cq_service(&lif
->hwstamp_rxq
->cq
, budget
,
1262 ionic_rx_service
, NULL
, NULL
);
1264 if (lif
->hwstamp_txq
)
1265 tx_work
= ionic_tx_cq_service(&lif
->hwstamp_txq
->cq
, budget
);
1267 work_done
= max(max(n_work
, a_work
), max(rx_work
, tx_work
));
1268 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
1269 flags
|= IONIC_INTR_CRED_UNMASK
;
1270 intr
->rearm_count
++;
1273 if (work_done
|| flags
) {
1274 flags
|= IONIC_INTR_CRED_RESET_COALESCE
;
1275 credits
= n_work
+ a_work
+ rx_work
+ tx_work
;
1276 ionic_intr_credits(idev
->intr_ctrl
, intr
->index
, credits
, flags
);
1279 if (!a_work
&& ionic_adminq_poke_doorbell(&lif
->adminqcq
->q
))
1281 if (lif
->hwstamp_rxq
&& !rx_work
&& ionic_rxq_poke_doorbell(&lif
->hwstamp_rxq
->q
))
1283 if (lif
->hwstamp_txq
&& !tx_work
&& ionic_txq_poke_doorbell(&lif
->hwstamp_txq
->q
))
1286 mod_timer(&lif
->adminqcq
->napi_deadline
,
1287 jiffies
+ IONIC_NAPI_DEADLINE
);
1292 void ionic_get_stats64(struct net_device
*netdev
,
1293 struct rtnl_link_stats64
*ns
)
1295 struct ionic_lif
*lif
= netdev_priv(netdev
);
1296 struct ionic_lif_stats
*ls
;
1298 memset(ns
, 0, sizeof(*ns
));
1299 ls
= &lif
->info
->stats
;
1301 ns
->rx_packets
= le64_to_cpu(ls
->rx_ucast_packets
) +
1302 le64_to_cpu(ls
->rx_mcast_packets
) +
1303 le64_to_cpu(ls
->rx_bcast_packets
);
1305 ns
->tx_packets
= le64_to_cpu(ls
->tx_ucast_packets
) +
1306 le64_to_cpu(ls
->tx_mcast_packets
) +
1307 le64_to_cpu(ls
->tx_bcast_packets
);
1309 ns
->rx_bytes
= le64_to_cpu(ls
->rx_ucast_bytes
) +
1310 le64_to_cpu(ls
->rx_mcast_bytes
) +
1311 le64_to_cpu(ls
->rx_bcast_bytes
);
1313 ns
->tx_bytes
= le64_to_cpu(ls
->tx_ucast_bytes
) +
1314 le64_to_cpu(ls
->tx_mcast_bytes
) +
1315 le64_to_cpu(ls
->tx_bcast_bytes
);
1317 ns
->rx_dropped
= le64_to_cpu(ls
->rx_ucast_drop_packets
) +
1318 le64_to_cpu(ls
->rx_mcast_drop_packets
) +
1319 le64_to_cpu(ls
->rx_bcast_drop_packets
);
1321 ns
->tx_dropped
= le64_to_cpu(ls
->tx_ucast_drop_packets
) +
1322 le64_to_cpu(ls
->tx_mcast_drop_packets
) +
1323 le64_to_cpu(ls
->tx_bcast_drop_packets
);
1325 ns
->multicast
= le64_to_cpu(ls
->rx_mcast_packets
);
1327 ns
->rx_over_errors
= le64_to_cpu(ls
->rx_queue_empty
);
1329 ns
->rx_missed_errors
= le64_to_cpu(ls
->rx_dma_error
) +
1330 le64_to_cpu(ls
->rx_queue_disabled
) +
1331 le64_to_cpu(ls
->rx_desc_fetch_error
) +
1332 le64_to_cpu(ls
->rx_desc_data_error
);
1334 ns
->tx_aborted_errors
= le64_to_cpu(ls
->tx_dma_error
) +
1335 le64_to_cpu(ls
->tx_queue_disabled
) +
1336 le64_to_cpu(ls
->tx_desc_fetch_error
) +
1337 le64_to_cpu(ls
->tx_desc_data_error
);
1339 ns
->rx_errors
= ns
->rx_over_errors
+
1340 ns
->rx_missed_errors
;
1342 ns
->tx_errors
= ns
->tx_aborted_errors
;
1345 static int ionic_addr_add(struct net_device
*netdev
, const u8
*addr
)
1347 return ionic_lif_list_addr(netdev_priv(netdev
), addr
, ADD_ADDR
);
1350 static int ionic_addr_del(struct net_device
*netdev
, const u8
*addr
)
1352 /* Don't delete our own address from the uc list */
1353 if (ether_addr_equal(addr
, netdev
->dev_addr
))
1356 return ionic_lif_list_addr(netdev_priv(netdev
), addr
, DEL_ADDR
);
1359 void ionic_lif_rx_mode(struct ionic_lif
*lif
)
1361 struct net_device
*netdev
= lif
->netdev
;
1362 unsigned int nfilters
;
1363 unsigned int nd_flags
;
1367 #define REMAIN(__x) (sizeof(buf) - (__x))
1369 mutex_lock(&lif
->config_lock
);
1371 /* grab the flags once for local use */
1372 nd_flags
= netdev
->flags
;
1374 rx_mode
= IONIC_RX_MODE_F_UNICAST
;
1375 rx_mode
|= (nd_flags
& IFF_MULTICAST
) ? IONIC_RX_MODE_F_MULTICAST
: 0;
1376 rx_mode
|= (nd_flags
& IFF_BROADCAST
) ? IONIC_RX_MODE_F_BROADCAST
: 0;
1377 rx_mode
|= (nd_flags
& IFF_PROMISC
) ? IONIC_RX_MODE_F_PROMISC
: 0;
1378 rx_mode
|= (nd_flags
& IFF_ALLMULTI
) ? IONIC_RX_MODE_F_ALLMULTI
: 0;
1380 /* sync the filters */
1381 ionic_rx_filter_sync(lif
);
1383 /* check for overflow state
1384 * if so, we track that we overflowed and enable NIC PROMISC
1385 * else if the overflow is set and not needed
1386 * we remove our overflow flag and check the netdev flags
1387 * to see if we can disable NIC PROMISC
1389 nfilters
= le32_to_cpu(lif
->identity
->eth
.max_ucast_filters
);
1391 if (((lif
->nucast
+ lif
->nmcast
) >= nfilters
) ||
1392 (lif
->max_vlans
&& lif
->nvlans
>= lif
->max_vlans
)) {
1393 rx_mode
|= IONIC_RX_MODE_F_PROMISC
;
1394 rx_mode
|= IONIC_RX_MODE_F_ALLMULTI
;
1396 if (!(nd_flags
& IFF_PROMISC
))
1397 rx_mode
&= ~IONIC_RX_MODE_F_PROMISC
;
1398 if (!(nd_flags
& IFF_ALLMULTI
))
1399 rx_mode
&= ~IONIC_RX_MODE_F_ALLMULTI
;
1402 i
= scnprintf(buf
, sizeof(buf
), "rx_mode 0x%04x -> 0x%04x:",
1403 lif
->rx_mode
, rx_mode
);
1404 if (rx_mode
& IONIC_RX_MODE_F_UNICAST
)
1405 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_UNICAST");
1406 if (rx_mode
& IONIC_RX_MODE_F_MULTICAST
)
1407 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_MULTICAST");
1408 if (rx_mode
& IONIC_RX_MODE_F_BROADCAST
)
1409 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_BROADCAST");
1410 if (rx_mode
& IONIC_RX_MODE_F_PROMISC
)
1411 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_PROMISC");
1412 if (rx_mode
& IONIC_RX_MODE_F_ALLMULTI
)
1413 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_ALLMULTI");
1414 if (rx_mode
& IONIC_RX_MODE_F_RDMA_SNIFFER
)
1415 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_RDMA_SNIFFER");
1416 netdev_dbg(netdev
, "lif%d %s\n", lif
->index
, buf
);
1418 if (lif
->rx_mode
!= rx_mode
) {
1419 struct ionic_admin_ctx ctx
= {
1420 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1421 .cmd
.rx_mode_set
= {
1422 .opcode
= IONIC_CMD_RX_MODE_SET
,
1423 .lif_index
= cpu_to_le16(lif
->index
),
1428 ctx
.cmd
.rx_mode_set
.rx_mode
= cpu_to_le16(rx_mode
);
1429 err
= ionic_adminq_post_wait(lif
, &ctx
);
1431 netdev_warn(netdev
, "set rx_mode 0x%04x failed: %d\n",
1434 lif
->rx_mode
= rx_mode
;
1437 mutex_unlock(&lif
->config_lock
);
1440 static void ionic_ndo_set_rx_mode(struct net_device
*netdev
)
1442 struct ionic_lif
*lif
= netdev_priv(netdev
);
1443 struct ionic_deferred_work
*work
;
1445 /* Sync the kernel filter list with the driver filter list */
1446 __dev_uc_sync(netdev
, ionic_addr_add
, ionic_addr_del
);
1447 __dev_mc_sync(netdev
, ionic_addr_add
, ionic_addr_del
);
1449 /* Shove off the rest of the rxmode work to the work task
1450 * which will include syncing the filters to the firmware.
1452 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1454 netdev_err(lif
->netdev
, "rxmode change dropped\n");
1457 work
->type
= IONIC_DW_TYPE_RX_MODE
;
1458 netdev_dbg(lif
->netdev
, "deferred: rx_mode\n");
1459 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
1462 static __le64
ionic_netdev_features_to_nic(netdev_features_t features
)
1466 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
1467 wanted
|= IONIC_ETH_HW_VLAN_TX_TAG
;
1468 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1469 wanted
|= IONIC_ETH_HW_VLAN_RX_STRIP
;
1470 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1471 wanted
|= IONIC_ETH_HW_VLAN_RX_FILTER
;
1472 if (features
& NETIF_F_RXHASH
)
1473 wanted
|= IONIC_ETH_HW_RX_HASH
;
1474 if (features
& NETIF_F_RXCSUM
)
1475 wanted
|= IONIC_ETH_HW_RX_CSUM
;
1476 if (features
& NETIF_F_SG
)
1477 wanted
|= IONIC_ETH_HW_TX_SG
;
1478 if (features
& NETIF_F_HW_CSUM
)
1479 wanted
|= IONIC_ETH_HW_TX_CSUM
;
1480 if (features
& NETIF_F_TSO
)
1481 wanted
|= IONIC_ETH_HW_TSO
;
1482 if (features
& NETIF_F_TSO6
)
1483 wanted
|= IONIC_ETH_HW_TSO_IPV6
;
1484 if (features
& NETIF_F_TSO_ECN
)
1485 wanted
|= IONIC_ETH_HW_TSO_ECN
;
1486 if (features
& NETIF_F_GSO_GRE
)
1487 wanted
|= IONIC_ETH_HW_TSO_GRE
;
1488 if (features
& NETIF_F_GSO_GRE_CSUM
)
1489 wanted
|= IONIC_ETH_HW_TSO_GRE_CSUM
;
1490 if (features
& NETIF_F_GSO_IPXIP4
)
1491 wanted
|= IONIC_ETH_HW_TSO_IPXIP4
;
1492 if (features
& NETIF_F_GSO_IPXIP6
)
1493 wanted
|= IONIC_ETH_HW_TSO_IPXIP6
;
1494 if (features
& NETIF_F_GSO_UDP_TUNNEL
)
1495 wanted
|= IONIC_ETH_HW_TSO_UDP
;
1496 if (features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
)
1497 wanted
|= IONIC_ETH_HW_TSO_UDP_CSUM
;
1499 return cpu_to_le64(wanted
);
1502 static int ionic_set_nic_features(struct ionic_lif
*lif
,
1503 netdev_features_t features
)
1505 struct device
*dev
= lif
->ionic
->dev
;
1506 struct ionic_admin_ctx ctx
= {
1507 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1508 .cmd
.lif_setattr
= {
1509 .opcode
= IONIC_CMD_LIF_SETATTR
,
1510 .index
= cpu_to_le16(lif
->index
),
1511 .attr
= IONIC_LIF_ATTR_FEATURES
,
1514 u64 vlan_flags
= IONIC_ETH_HW_VLAN_TX_TAG
|
1515 IONIC_ETH_HW_VLAN_RX_STRIP
|
1516 IONIC_ETH_HW_VLAN_RX_FILTER
;
1517 u64 old_hw_features
;
1520 ctx
.cmd
.lif_setattr
.features
= ionic_netdev_features_to_nic(features
);
1523 ctx
.cmd
.lif_setattr
.features
|= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP
);
1525 err
= ionic_adminq_post_wait(lif
, &ctx
);
1529 old_hw_features
= lif
->hw_features
;
1530 lif
->hw_features
= le64_to_cpu(ctx
.cmd
.lif_setattr
.features
&
1531 ctx
.comp
.lif_setattr
.features
);
1533 if ((old_hw_features
^ lif
->hw_features
) & IONIC_ETH_HW_RX_HASH
)
1534 ionic_lif_rss_config(lif
, lif
->rss_types
, NULL
, NULL
);
1536 if ((vlan_flags
& le64_to_cpu(ctx
.cmd
.lif_setattr
.features
)) &&
1537 !(vlan_flags
& le64_to_cpu(ctx
.comp
.lif_setattr
.features
)))
1538 dev_info_once(lif
->ionic
->dev
, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1540 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_TX_TAG
)
1541 dev_dbg(dev
, "feature ETH_HW_VLAN_TX_TAG\n");
1542 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_STRIP
)
1543 dev_dbg(dev
, "feature ETH_HW_VLAN_RX_STRIP\n");
1544 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_FILTER
)
1545 dev_dbg(dev
, "feature ETH_HW_VLAN_RX_FILTER\n");
1546 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
)
1547 dev_dbg(dev
, "feature ETH_HW_RX_HASH\n");
1548 if (lif
->hw_features
& IONIC_ETH_HW_TX_SG
)
1549 dev_dbg(dev
, "feature ETH_HW_TX_SG\n");
1550 if (lif
->hw_features
& IONIC_ETH_HW_TX_CSUM
)
1551 dev_dbg(dev
, "feature ETH_HW_TX_CSUM\n");
1552 if (lif
->hw_features
& IONIC_ETH_HW_RX_CSUM
)
1553 dev_dbg(dev
, "feature ETH_HW_RX_CSUM\n");
1554 if (lif
->hw_features
& IONIC_ETH_HW_TSO
)
1555 dev_dbg(dev
, "feature ETH_HW_TSO\n");
1556 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPV6
)
1557 dev_dbg(dev
, "feature ETH_HW_TSO_IPV6\n");
1558 if (lif
->hw_features
& IONIC_ETH_HW_TSO_ECN
)
1559 dev_dbg(dev
, "feature ETH_HW_TSO_ECN\n");
1560 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE
)
1561 dev_dbg(dev
, "feature ETH_HW_TSO_GRE\n");
1562 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE_CSUM
)
1563 dev_dbg(dev
, "feature ETH_HW_TSO_GRE_CSUM\n");
1564 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP4
)
1565 dev_dbg(dev
, "feature ETH_HW_TSO_IPXIP4\n");
1566 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP6
)
1567 dev_dbg(dev
, "feature ETH_HW_TSO_IPXIP6\n");
1568 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP
)
1569 dev_dbg(dev
, "feature ETH_HW_TSO_UDP\n");
1570 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP_CSUM
)
1571 dev_dbg(dev
, "feature ETH_HW_TSO_UDP_CSUM\n");
1572 if (lif
->hw_features
& IONIC_ETH_HW_TIMESTAMP
)
1573 dev_dbg(dev
, "feature ETH_HW_TIMESTAMP\n");
1578 static int ionic_init_nic_features(struct ionic_lif
*lif
)
1580 struct net_device
*netdev
= lif
->netdev
;
1581 netdev_features_t features
;
1584 /* set up what we expect to support by default */
1585 features
= NETIF_F_HW_VLAN_CTAG_TX
|
1586 NETIF_F_HW_VLAN_CTAG_RX
|
1587 NETIF_F_HW_VLAN_CTAG_FILTER
|
1595 NETIF_F_GSO_GRE_CSUM
|
1596 NETIF_F_GSO_IPXIP4
|
1597 NETIF_F_GSO_IPXIP6
|
1598 NETIF_F_GSO_UDP_TUNNEL
|
1599 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1602 features
|= NETIF_F_RXHASH
;
1604 err
= ionic_set_nic_features(lif
, features
);
1608 /* tell the netdev what we actually can support */
1609 netdev
->features
|= NETIF_F_HIGHDMA
;
1611 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_TX_TAG
)
1612 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
1613 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_STRIP
)
1614 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1615 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_FILTER
)
1616 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1617 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
)
1618 netdev
->hw_features
|= NETIF_F_RXHASH
;
1619 if (lif
->hw_features
& IONIC_ETH_HW_TX_SG
)
1620 netdev
->hw_features
|= NETIF_F_SG
;
1622 if (lif
->hw_features
& IONIC_ETH_HW_TX_CSUM
)
1623 netdev
->hw_enc_features
|= NETIF_F_HW_CSUM
;
1624 if (lif
->hw_features
& IONIC_ETH_HW_RX_CSUM
)
1625 netdev
->hw_enc_features
|= NETIF_F_RXCSUM
;
1626 if (lif
->hw_features
& IONIC_ETH_HW_TSO
)
1627 netdev
->hw_enc_features
|= NETIF_F_TSO
;
1628 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPV6
)
1629 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
1630 if (lif
->hw_features
& IONIC_ETH_HW_TSO_ECN
)
1631 netdev
->hw_enc_features
|= NETIF_F_TSO_ECN
;
1632 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE
)
1633 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
;
1634 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE_CSUM
)
1635 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE_CSUM
;
1636 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP4
)
1637 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP4
;
1638 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP6
)
1639 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP6
;
1640 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP
)
1641 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
1642 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP_CSUM
)
1643 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1645 netdev
->hw_features
|= netdev
->hw_enc_features
;
1646 netdev
->features
|= netdev
->hw_features
;
1647 netdev
->vlan_features
|= netdev
->features
& ~NETIF_F_VLAN_FEATURES
;
1649 netdev
->priv_flags
|= IFF_UNICAST_FLT
|
1650 IFF_LIVE_ADDR_CHANGE
;
1652 netdev
->xdp_features
= NETDEV_XDP_ACT_BASIC
|
1653 NETDEV_XDP_ACT_REDIRECT
|
1654 NETDEV_XDP_ACT_RX_SG
|
1655 NETDEV_XDP_ACT_NDO_XMIT
|
1656 NETDEV_XDP_ACT_NDO_XMIT_SG
;
1661 static int ionic_set_features(struct net_device
*netdev
,
1662 netdev_features_t features
)
1664 struct ionic_lif
*lif
= netdev_priv(netdev
);
1667 netdev_dbg(netdev
, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1668 __func__
, (u64
)lif
->netdev
->features
, (u64
)features
);
1670 err
= ionic_set_nic_features(lif
, features
);
1675 static int ionic_set_attr_mac(struct ionic_lif
*lif
, u8
*mac
)
1677 struct ionic_admin_ctx ctx
= {
1678 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1679 .cmd
.lif_setattr
= {
1680 .opcode
= IONIC_CMD_LIF_SETATTR
,
1681 .index
= cpu_to_le16(lif
->index
),
1682 .attr
= IONIC_LIF_ATTR_MAC
,
1686 ether_addr_copy(ctx
.cmd
.lif_setattr
.mac
, mac
);
1687 return ionic_adminq_post_wait(lif
, &ctx
);
1690 static int ionic_get_attr_mac(struct ionic_lif
*lif
, u8
*mac_addr
)
1692 struct ionic_admin_ctx ctx
= {
1693 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1694 .cmd
.lif_getattr
= {
1695 .opcode
= IONIC_CMD_LIF_GETATTR
,
1696 .index
= cpu_to_le16(lif
->index
),
1697 .attr
= IONIC_LIF_ATTR_MAC
,
1702 err
= ionic_adminq_post_wait(lif
, &ctx
);
1706 ether_addr_copy(mac_addr
, ctx
.comp
.lif_getattr
.mac
);
1710 static int ionic_program_mac(struct ionic_lif
*lif
, u8
*mac
)
1712 u8 get_mac
[ETH_ALEN
];
1715 err
= ionic_set_attr_mac(lif
, mac
);
1719 err
= ionic_get_attr_mac(lif
, get_mac
);
1723 /* To deal with older firmware that silently ignores the set attr mac:
1724 * doesn't actually change the mac and doesn't return an error, so we
1725 * do the get attr to verify whether or not the set actually happened
1727 if (!ether_addr_equal(get_mac
, mac
))
1733 static int ionic_set_mac_address(struct net_device
*netdev
, void *sa
)
1735 struct ionic_lif
*lif
= netdev_priv(netdev
);
1736 struct sockaddr
*addr
= sa
;
1740 mac
= (u8
*)addr
->sa_data
;
1741 if (ether_addr_equal(netdev
->dev_addr
, mac
))
1744 err
= ionic_program_mac(lif
, mac
);
1749 netdev_dbg(netdev
, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1752 err
= eth_prepare_mac_addr_change(netdev
, addr
);
1756 if (!is_zero_ether_addr(netdev
->dev_addr
)) {
1757 netdev_info(netdev
, "deleting mac addr %pM\n",
1759 ionic_lif_addr_del(netdev_priv(netdev
), netdev
->dev_addr
);
1762 eth_commit_mac_addr_change(netdev
, addr
);
1763 netdev_info(netdev
, "updating mac addr %pM\n", mac
);
1765 return ionic_lif_addr_add(netdev_priv(netdev
), mac
);
1768 void ionic_stop_queues_reconfig(struct ionic_lif
*lif
)
1770 /* Stop and clean the queues before reconfiguration */
1771 netif_device_detach(lif
->netdev
);
1772 ionic_stop_queues(lif
);
1773 ionic_txrx_deinit(lif
);
1776 static int ionic_start_queues_reconfig(struct ionic_lif
*lif
)
1780 /* Re-init the queues after reconfiguration */
1782 /* The only way txrx_init can fail here is if communication
1783 * with FW is suddenly broken. There's not much we can do
1784 * at this point - error messages have already been printed,
1785 * so we can continue on and the user can eventually do a
1786 * DOWN and UP to try to reset and clear the issue.
1788 err
= ionic_txrx_init(lif
);
1789 ionic_link_status_check_request(lif
, CAN_NOT_SLEEP
);
1790 netif_device_attach(lif
->netdev
);
1795 static bool ionic_xdp_is_valid_mtu(struct ionic_lif
*lif
, u32 mtu
,
1796 struct bpf_prog
*xdp_prog
)
1801 if (mtu
<= IONIC_XDP_MAX_LINEAR_MTU
)
1804 if (xdp_prog
->aux
&& xdp_prog
->aux
->xdp_has_frags
)
1810 static int ionic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1812 struct ionic_lif
*lif
= netdev_priv(netdev
);
1813 struct ionic_admin_ctx ctx
= {
1814 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1815 .cmd
.lif_setattr
= {
1816 .opcode
= IONIC_CMD_LIF_SETATTR
,
1817 .index
= cpu_to_le16(lif
->index
),
1818 .attr
= IONIC_LIF_ATTR_MTU
,
1819 .mtu
= cpu_to_le32(new_mtu
),
1822 struct bpf_prog
*xdp_prog
;
1825 xdp_prog
= READ_ONCE(lif
->xdp_prog
);
1826 if (!ionic_xdp_is_valid_mtu(lif
, new_mtu
, xdp_prog
))
1829 err
= ionic_adminq_post_wait(lif
, &ctx
);
1833 /* if we're not running, nothing more to do */
1834 if (!netif_running(netdev
)) {
1835 netdev
->mtu
= new_mtu
;
1839 mutex_lock(&lif
->queue_lock
);
1840 ionic_stop_queues_reconfig(lif
);
1841 netdev
->mtu
= new_mtu
;
1842 err
= ionic_start_queues_reconfig(lif
);
1843 mutex_unlock(&lif
->queue_lock
);
1848 static void ionic_tx_timeout_work(struct work_struct
*ws
)
1850 struct ionic_lif
*lif
= container_of(ws
, struct ionic_lif
, tx_timeout_work
);
1853 if (test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
1856 /* if we were stopped before this scheduled job was launched,
1857 * don't bother the queues as they are already stopped.
1859 if (!netif_running(lif
->netdev
))
1862 mutex_lock(&lif
->queue_lock
);
1863 ionic_stop_queues_reconfig(lif
);
1864 err
= ionic_start_queues_reconfig(lif
);
1865 mutex_unlock(&lif
->queue_lock
);
1868 dev_err(lif
->ionic
->dev
, "%s: Restarting queues failed\n", __func__
);
1871 static void ionic_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
1873 struct ionic_lif
*lif
= netdev_priv(netdev
);
1875 netdev_info(lif
->netdev
, "Tx Timeout triggered - txq %d\n", txqueue
);
1876 schedule_work(&lif
->tx_timeout_work
);
1879 static int ionic_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
1882 struct ionic_lif
*lif
= netdev_priv(netdev
);
1885 err
= ionic_lif_vlan_add(lif
, vid
);
1889 ionic_lif_rx_mode(lif
);
1894 static int ionic_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
1897 struct ionic_lif
*lif
= netdev_priv(netdev
);
1900 err
= ionic_lif_vlan_del(lif
, vid
);
1904 ionic_lif_rx_mode(lif
);
1909 int ionic_lif_rss_config(struct ionic_lif
*lif
, const u16 types
,
1910 const u8
*key
, const u32
*indir
)
1912 struct ionic_admin_ctx ctx
= {
1913 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1914 .cmd
.lif_setattr
= {
1915 .opcode
= IONIC_CMD_LIF_SETATTR
,
1916 .attr
= IONIC_LIF_ATTR_RSS
,
1917 .rss
.addr
= cpu_to_le64(lif
->rss_ind_tbl_pa
),
1920 unsigned int i
, tbl_sz
;
1922 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
) {
1923 lif
->rss_types
= types
;
1924 ctx
.cmd
.lif_setattr
.rss
.types
= cpu_to_le16(types
);
1928 memcpy(lif
->rss_hash_key
, key
, IONIC_RSS_HASH_KEY_SIZE
);
1931 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1932 for (i
= 0; i
< tbl_sz
; i
++)
1933 lif
->rss_ind_tbl
[i
] = indir
[i
];
1936 memcpy(ctx
.cmd
.lif_setattr
.rss
.key
, lif
->rss_hash_key
,
1937 IONIC_RSS_HASH_KEY_SIZE
);
1939 return ionic_adminq_post_wait(lif
, &ctx
);
1942 static int ionic_lif_rss_init(struct ionic_lif
*lif
)
1944 unsigned int tbl_sz
;
1947 lif
->rss_types
= IONIC_RSS_TYPE_IPV4
|
1948 IONIC_RSS_TYPE_IPV4_TCP
|
1949 IONIC_RSS_TYPE_IPV4_UDP
|
1950 IONIC_RSS_TYPE_IPV6
|
1951 IONIC_RSS_TYPE_IPV6_TCP
|
1952 IONIC_RSS_TYPE_IPV6_UDP
;
1954 /* Fill indirection table with 'default' values */
1955 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1956 for (i
= 0; i
< tbl_sz
; i
++)
1957 lif
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, lif
->nxqs
);
1959 return ionic_lif_rss_config(lif
, lif
->rss_types
, NULL
, NULL
);
1962 static void ionic_lif_rss_deinit(struct ionic_lif
*lif
)
1966 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1967 memset(lif
->rss_ind_tbl
, 0, tbl_sz
);
1968 memset(lif
->rss_hash_key
, 0, IONIC_RSS_HASH_KEY_SIZE
);
1970 ionic_lif_rss_config(lif
, 0x0, NULL
, NULL
);
1973 static void ionic_lif_quiesce(struct ionic_lif
*lif
)
1975 struct ionic_admin_ctx ctx
= {
1976 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1977 .cmd
.lif_setattr
= {
1978 .opcode
= IONIC_CMD_LIF_SETATTR
,
1979 .index
= cpu_to_le16(lif
->index
),
1980 .attr
= IONIC_LIF_ATTR_STATE
,
1981 .state
= IONIC_LIF_QUIESCE
,
1986 err
= ionic_adminq_post_wait(lif
, &ctx
);
1988 netdev_dbg(lif
->netdev
, "lif quiesce failed %d\n", err
);
1991 static void ionic_txrx_disable(struct ionic_lif
*lif
)
1997 for (i
= 0; i
< lif
->nxqs
; i
++)
1998 err
= ionic_qcq_disable(lif
, lif
->txqcqs
[i
], err
);
2001 if (lif
->hwstamp_txq
)
2002 err
= ionic_qcq_disable(lif
, lif
->hwstamp_txq
, err
);
2005 for (i
= 0; i
< lif
->nxqs
; i
++)
2006 err
= ionic_qcq_disable(lif
, lif
->rxqcqs
[i
], err
);
2009 if (lif
->hwstamp_rxq
)
2010 err
= ionic_qcq_disable(lif
, lif
->hwstamp_rxq
, err
);
2012 ionic_lif_quiesce(lif
);
2015 static void ionic_txrx_deinit(struct ionic_lif
*lif
)
2020 for (i
= 0; i
< lif
->nxqs
&& lif
->txqcqs
[i
]; i
++) {
2021 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
]);
2022 ionic_tx_flush(&lif
->txqcqs
[i
]->cq
);
2023 ionic_tx_empty(&lif
->txqcqs
[i
]->q
);
2028 for (i
= 0; i
< lif
->nxqs
&& lif
->rxqcqs
[i
]; i
++) {
2029 ionic_lif_qcq_deinit(lif
, lif
->rxqcqs
[i
]);
2030 ionic_rx_empty(&lif
->rxqcqs
[i
]->q
);
2035 if (lif
->hwstamp_txq
) {
2036 ionic_lif_qcq_deinit(lif
, lif
->hwstamp_txq
);
2037 ionic_tx_flush(&lif
->hwstamp_txq
->cq
);
2038 ionic_tx_empty(&lif
->hwstamp_txq
->q
);
2041 if (lif
->hwstamp_rxq
) {
2042 ionic_lif_qcq_deinit(lif
, lif
->hwstamp_rxq
);
2043 ionic_rx_empty(&lif
->hwstamp_rxq
->q
);
2047 void ionic_txrx_free(struct ionic_lif
*lif
)
2052 for (i
= 0; i
< lif
->ionic
->ntxqs_per_lif
&& lif
->txqcqs
[i
]; i
++) {
2053 ionic_qcq_free(lif
, lif
->txqcqs
[i
]);
2054 devm_kfree(lif
->ionic
->dev
, lif
->txqcqs
[i
]);
2055 lif
->txqcqs
[i
] = NULL
;
2060 for (i
= 0; i
< lif
->ionic
->nrxqs_per_lif
&& lif
->rxqcqs
[i
]; i
++) {
2061 ionic_qcq_free(lif
, lif
->rxqcqs
[i
]);
2062 devm_kfree(lif
->ionic
->dev
, lif
->rxqcqs
[i
]);
2063 lif
->rxqcqs
[i
] = NULL
;
2067 if (lif
->hwstamp_txq
) {
2068 ionic_qcq_free(lif
, lif
->hwstamp_txq
);
2069 devm_kfree(lif
->ionic
->dev
, lif
->hwstamp_txq
);
2070 lif
->hwstamp_txq
= NULL
;
2073 if (lif
->hwstamp_rxq
) {
2074 ionic_qcq_free(lif
, lif
->hwstamp_rxq
);
2075 devm_kfree(lif
->ionic
->dev
, lif
->hwstamp_rxq
);
2076 lif
->hwstamp_rxq
= NULL
;
2080 static int ionic_txrx_alloc(struct ionic_lif
*lif
)
2082 unsigned int comp_sz
, desc_sz
, num_desc
, sg_desc_sz
;
2083 unsigned int flags
, i
;
2086 num_desc
= lif
->ntxq_descs
;
2087 desc_sz
= sizeof(struct ionic_txq_desc
);
2088 comp_sz
= sizeof(struct ionic_txq_comp
);
2090 if (lif
->qtype_info
[IONIC_QTYPE_TXQ
].version
>= 1 &&
2091 lif
->qtype_info
[IONIC_QTYPE_TXQ
].sg_desc_sz
==
2092 sizeof(struct ionic_txq_sg_desc_v1
))
2093 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc_v1
);
2095 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc
);
2097 flags
= IONIC_QCQ_F_TX_STATS
| IONIC_QCQ_F_SG
;
2099 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS
, lif
->state
))
2100 flags
|= IONIC_QCQ_F_CMB_RINGS
;
2102 if (test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
))
2103 flags
|= IONIC_QCQ_F_INTR
;
2105 for (i
= 0; i
< lif
->nxqs
; i
++) {
2106 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_TXQ
, i
, "tx", flags
,
2107 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
2108 lif
->kern_pid
, &lif
->txqcqs
[i
]);
2112 if (flags
& IONIC_QCQ_F_INTR
) {
2113 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
2114 lif
->txqcqs
[i
]->intr
.index
,
2115 lif
->tx_coalesce_hw
);
2116 if (test_bit(IONIC_LIF_F_TX_DIM_INTR
, lif
->state
))
2117 lif
->txqcqs
[i
]->intr
.dim_coal_hw
= lif
->tx_coalesce_hw
;
2120 ionic_debugfs_add_qcq(lif
, lif
->txqcqs
[i
]);
2123 flags
= IONIC_QCQ_F_RX_STATS
| IONIC_QCQ_F_SG
| IONIC_QCQ_F_INTR
;
2125 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS
, lif
->state
))
2126 flags
|= IONIC_QCQ_F_CMB_RINGS
;
2128 num_desc
= lif
->nrxq_descs
;
2129 desc_sz
= sizeof(struct ionic_rxq_desc
);
2130 comp_sz
= sizeof(struct ionic_rxq_comp
);
2131 sg_desc_sz
= sizeof(struct ionic_rxq_sg_desc
);
2133 if (lif
->rxq_features
& IONIC_Q_F_2X_CQ_DESC
)
2136 for (i
= 0; i
< lif
->nxqs
; i
++) {
2137 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_RXQ
, i
, "rx", flags
,
2138 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
2139 lif
->kern_pid
, &lif
->rxqcqs
[i
]);
2143 lif
->rxqcqs
[i
]->q
.features
= lif
->rxq_features
;
2145 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
2146 lif
->rxqcqs
[i
]->intr
.index
,
2147 lif
->rx_coalesce_hw
);
2148 if (test_bit(IONIC_LIF_F_RX_DIM_INTR
, lif
->state
))
2149 lif
->rxqcqs
[i
]->intr
.dim_coal_hw
= lif
->rx_coalesce_hw
;
2151 if (!test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
))
2152 ionic_link_qcq_interrupts(lif
->rxqcqs
[i
],
2155 ionic_debugfs_add_qcq(lif
, lif
->rxqcqs
[i
]);
2161 ionic_txrx_free(lif
);
2166 static int ionic_txrx_init(struct ionic_lif
*lif
)
2171 for (i
= 0; i
< lif
->nxqs
; i
++) {
2172 err
= ionic_lif_txq_init(lif
, lif
->txqcqs
[i
]);
2176 err
= ionic_lif_rxq_init(lif
, lif
->rxqcqs
[i
]);
2178 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
]);
2183 if (lif
->netdev
->features
& NETIF_F_RXHASH
)
2184 ionic_lif_rss_init(lif
);
2186 ionic_lif_rx_mode(lif
);
2192 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
]);
2193 ionic_lif_qcq_deinit(lif
, lif
->rxqcqs
[i
]);
2199 static int ionic_txrx_enable(struct ionic_lif
*lif
)
2204 err
= ionic_xdp_queues_config(lif
);
2208 for (i
= 0; i
< lif
->nxqs
; i
++) {
2209 if (!(lif
->rxqcqs
[i
] && lif
->txqcqs
[i
])) {
2210 dev_err(lif
->ionic
->dev
, "%s: bad qcq %d\n", __func__
, i
);
2215 ionic_rx_fill(&lif
->rxqcqs
[i
]->q
);
2216 err
= ionic_qcq_enable(lif
->rxqcqs
[i
]);
2220 err
= ionic_qcq_enable(lif
->txqcqs
[i
]);
2222 derr
= ionic_qcq_disable(lif
, lif
->rxqcqs
[i
], err
);
2227 if (lif
->hwstamp_rxq
) {
2228 ionic_rx_fill(&lif
->hwstamp_rxq
->q
);
2229 err
= ionic_qcq_enable(lif
->hwstamp_rxq
);
2231 goto err_out_hwstamp_rx
;
2234 if (lif
->hwstamp_txq
) {
2235 err
= ionic_qcq_enable(lif
->hwstamp_txq
);
2237 goto err_out_hwstamp_tx
;
2243 if (lif
->hwstamp_rxq
)
2244 derr
= ionic_qcq_disable(lif
, lif
->hwstamp_rxq
, derr
);
2249 derr
= ionic_qcq_disable(lif
, lif
->txqcqs
[i
], derr
);
2250 derr
= ionic_qcq_disable(lif
, lif
->rxqcqs
[i
], derr
);
2253 ionic_xdp_queues_config(lif
);
2258 static int ionic_start_queues(struct ionic_lif
*lif
)
2262 if (test_bit(IONIC_LIF_F_BROKEN
, lif
->state
))
2265 if (test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
2268 if (test_and_set_bit(IONIC_LIF_F_UP
, lif
->state
))
2271 err
= ionic_txrx_enable(lif
);
2273 clear_bit(IONIC_LIF_F_UP
, lif
->state
);
2276 netif_tx_wake_all_queues(lif
->netdev
);
2281 static int ionic_open(struct net_device
*netdev
)
2283 struct ionic_lif
*lif
= netdev_priv(netdev
);
2286 /* If recovering from a broken state, clear the bit and we'll try again */
2287 if (test_and_clear_bit(IONIC_LIF_F_BROKEN
, lif
->state
))
2288 netdev_info(netdev
, "clearing broken state\n");
2290 mutex_lock(&lif
->queue_lock
);
2292 err
= ionic_txrx_alloc(lif
);
2296 err
= ionic_txrx_init(lif
);
2300 err
= netif_set_real_num_tx_queues(netdev
, lif
->nxqs
);
2302 goto err_txrx_deinit
;
2304 err
= netif_set_real_num_rx_queues(netdev
, lif
->nxqs
);
2306 goto err_txrx_deinit
;
2308 /* don't start the queues until we have link */
2309 if (netif_carrier_ok(netdev
)) {
2310 err
= ionic_start_queues(lif
);
2312 goto err_txrx_deinit
;
2315 /* If hardware timestamping is enabled, but the queues were freed by
2316 * ionic_stop, those need to be reallocated and initialized, too.
2318 ionic_lif_hwstamp_recreate_queues(lif
);
2320 mutex_unlock(&lif
->queue_lock
);
2325 ionic_txrx_deinit(lif
);
2327 ionic_txrx_free(lif
);
2329 mutex_unlock(&lif
->queue_lock
);
2333 static void ionic_stop_queues(struct ionic_lif
*lif
)
2335 if (!test_and_clear_bit(IONIC_LIF_F_UP
, lif
->state
))
2338 netif_tx_disable(lif
->netdev
);
2339 ionic_txrx_disable(lif
);
2342 static int ionic_stop(struct net_device
*netdev
)
2344 struct ionic_lif
*lif
= netdev_priv(netdev
);
2346 if (test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
2349 mutex_lock(&lif
->queue_lock
);
2350 ionic_stop_queues(lif
);
2351 ionic_txrx_deinit(lif
);
2352 ionic_txrx_free(lif
);
2353 mutex_unlock(&lif
->queue_lock
);
2358 static int ionic_eth_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2360 struct ionic_lif
*lif
= netdev_priv(netdev
);
2364 return ionic_lif_hwstamp_set(lif
, ifr
);
2366 return ionic_lif_hwstamp_get(lif
, ifr
);
2372 static int ionic_get_vf_config(struct net_device
*netdev
,
2373 int vf
, struct ifla_vf_info
*ivf
)
2375 struct ionic_lif
*lif
= netdev_priv(netdev
);
2376 struct ionic
*ionic
= lif
->ionic
;
2379 if (!netif_device_present(netdev
))
2382 down_read(&ionic
->vf_op_lock
);
2384 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2387 struct ionic_vf
*vfdata
= &ionic
->vfs
[vf
];
2391 ivf
->vlan
= le16_to_cpu(vfdata
->vlanid
);
2392 ivf
->spoofchk
= vfdata
->spoofchk
;
2393 ivf
->linkstate
= vfdata
->linkstate
;
2394 ivf
->max_tx_rate
= le32_to_cpu(vfdata
->maxrate
);
2395 ivf
->trusted
= vfdata
->trusted
;
2396 ether_addr_copy(ivf
->mac
, vfdata
->macaddr
);
2399 up_read(&ionic
->vf_op_lock
);
2403 static int ionic_get_vf_stats(struct net_device
*netdev
, int vf
,
2404 struct ifla_vf_stats
*vf_stats
)
2406 struct ionic_lif
*lif
= netdev_priv(netdev
);
2407 struct ionic
*ionic
= lif
->ionic
;
2408 struct ionic_lif_stats
*vs
;
2411 if (!netif_device_present(netdev
))
2414 down_read(&ionic
->vf_op_lock
);
2416 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2419 memset(vf_stats
, 0, sizeof(*vf_stats
));
2420 vs
= &ionic
->vfs
[vf
].stats
;
2422 vf_stats
->rx_packets
= le64_to_cpu(vs
->rx_ucast_packets
);
2423 vf_stats
->tx_packets
= le64_to_cpu(vs
->tx_ucast_packets
);
2424 vf_stats
->rx_bytes
= le64_to_cpu(vs
->rx_ucast_bytes
);
2425 vf_stats
->tx_bytes
= le64_to_cpu(vs
->tx_ucast_bytes
);
2426 vf_stats
->broadcast
= le64_to_cpu(vs
->rx_bcast_packets
);
2427 vf_stats
->multicast
= le64_to_cpu(vs
->rx_mcast_packets
);
2428 vf_stats
->rx_dropped
= le64_to_cpu(vs
->rx_ucast_drop_packets
) +
2429 le64_to_cpu(vs
->rx_mcast_drop_packets
) +
2430 le64_to_cpu(vs
->rx_bcast_drop_packets
);
2431 vf_stats
->tx_dropped
= le64_to_cpu(vs
->tx_ucast_drop_packets
) +
2432 le64_to_cpu(vs
->tx_mcast_drop_packets
) +
2433 le64_to_cpu(vs
->tx_bcast_drop_packets
);
2436 up_read(&ionic
->vf_op_lock
);
2440 static int ionic_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
2442 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_MAC
};
2443 struct ionic_lif
*lif
= netdev_priv(netdev
);
2444 struct ionic
*ionic
= lif
->ionic
;
2447 if (!(is_zero_ether_addr(mac
) || is_valid_ether_addr(mac
)))
2450 if (!netif_device_present(netdev
))
2453 down_write(&ionic
->vf_op_lock
);
2455 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2458 ether_addr_copy(vfc
.macaddr
, mac
);
2459 dev_dbg(ionic
->dev
, "%s: vf %d macaddr %pM\n",
2460 __func__
, vf
, vfc
.macaddr
);
2462 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2464 ether_addr_copy(ionic
->vfs
[vf
].macaddr
, mac
);
2467 up_write(&ionic
->vf_op_lock
);
2471 static int ionic_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
2472 u8 qos
, __be16 proto
)
2474 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_VLAN
};
2475 struct ionic_lif
*lif
= netdev_priv(netdev
);
2476 struct ionic
*ionic
= lif
->ionic
;
2479 /* until someday when we support qos */
2486 if (proto
!= htons(ETH_P_8021Q
))
2487 return -EPROTONOSUPPORT
;
2489 if (!netif_device_present(netdev
))
2492 down_write(&ionic
->vf_op_lock
);
2494 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2497 vfc
.vlanid
= cpu_to_le16(vlan
);
2498 dev_dbg(ionic
->dev
, "%s: vf %d vlan %d\n",
2499 __func__
, vf
, le16_to_cpu(vfc
.vlanid
));
2501 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2503 ionic
->vfs
[vf
].vlanid
= cpu_to_le16(vlan
);
2506 up_write(&ionic
->vf_op_lock
);
2510 static int ionic_set_vf_rate(struct net_device
*netdev
, int vf
,
2511 int tx_min
, int tx_max
)
2513 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_RATE
};
2514 struct ionic_lif
*lif
= netdev_priv(netdev
);
2515 struct ionic
*ionic
= lif
->ionic
;
2518 /* setting the min just seems silly */
2522 if (!netif_device_present(netdev
))
2525 down_write(&ionic
->vf_op_lock
);
2527 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2530 vfc
.maxrate
= cpu_to_le32(tx_max
);
2531 dev_dbg(ionic
->dev
, "%s: vf %d maxrate %d\n",
2532 __func__
, vf
, le32_to_cpu(vfc
.maxrate
));
2534 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2536 ionic
->vfs
[vf
].maxrate
= cpu_to_le32(tx_max
);
2539 up_write(&ionic
->vf_op_lock
);
2543 static int ionic_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool set
)
2545 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_SPOOFCHK
};
2546 struct ionic_lif
*lif
= netdev_priv(netdev
);
2547 struct ionic
*ionic
= lif
->ionic
;
2550 if (!netif_device_present(netdev
))
2553 down_write(&ionic
->vf_op_lock
);
2555 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2559 dev_dbg(ionic
->dev
, "%s: vf %d spoof %d\n",
2560 __func__
, vf
, vfc
.spoofchk
);
2562 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2564 ionic
->vfs
[vf
].spoofchk
= set
;
2567 up_write(&ionic
->vf_op_lock
);
2571 static int ionic_set_vf_trust(struct net_device
*netdev
, int vf
, bool set
)
2573 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_TRUST
};
2574 struct ionic_lif
*lif
= netdev_priv(netdev
);
2575 struct ionic
*ionic
= lif
->ionic
;
2578 if (!netif_device_present(netdev
))
2581 down_write(&ionic
->vf_op_lock
);
2583 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2587 dev_dbg(ionic
->dev
, "%s: vf %d trust %d\n",
2588 __func__
, vf
, vfc
.trust
);
2590 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2592 ionic
->vfs
[vf
].trusted
= set
;
2595 up_write(&ionic
->vf_op_lock
);
2599 static int ionic_set_vf_link_state(struct net_device
*netdev
, int vf
, int set
)
2601 struct ionic_vf_setattr_cmd vfc
= { .attr
= IONIC_VF_ATTR_LINKSTATE
};
2602 struct ionic_lif
*lif
= netdev_priv(netdev
);
2603 struct ionic
*ionic
= lif
->ionic
;
2608 case IFLA_VF_LINK_STATE_ENABLE
:
2609 vfls
= IONIC_VF_LINK_STATUS_UP
;
2611 case IFLA_VF_LINK_STATE_DISABLE
:
2612 vfls
= IONIC_VF_LINK_STATUS_DOWN
;
2614 case IFLA_VF_LINK_STATE_AUTO
:
2615 vfls
= IONIC_VF_LINK_STATUS_AUTO
;
2621 if (!netif_device_present(netdev
))
2624 down_write(&ionic
->vf_op_lock
);
2626 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
2629 vfc
.linkstate
= vfls
;
2630 dev_dbg(ionic
->dev
, "%s: vf %d linkstate %d\n",
2631 __func__
, vf
, vfc
.linkstate
);
2633 ret
= ionic_set_vf_config(ionic
, vf
, &vfc
);
2635 ionic
->vfs
[vf
].linkstate
= set
;
2638 up_write(&ionic
->vf_op_lock
);
2642 static void ionic_vf_attr_replay(struct ionic_lif
*lif
)
2644 struct ionic_vf_setattr_cmd vfc
= { };
2645 struct ionic
*ionic
= lif
->ionic
;
2652 down_read(&ionic
->vf_op_lock
);
2654 for (i
= 0; i
< ionic
->num_vfs
; i
++) {
2658 vfc
.attr
= IONIC_VF_ATTR_STATSADDR
;
2659 vfc
.stats_pa
= cpu_to_le64(v
->stats_pa
);
2660 ionic_set_vf_config(ionic
, i
, &vfc
);
2664 if (!is_zero_ether_addr(v
->macaddr
)) {
2665 vfc
.attr
= IONIC_VF_ATTR_MAC
;
2666 ether_addr_copy(vfc
.macaddr
, v
->macaddr
);
2667 ionic_set_vf_config(ionic
, i
, &vfc
);
2668 eth_zero_addr(vfc
.macaddr
);
2672 vfc
.attr
= IONIC_VF_ATTR_VLAN
;
2673 vfc
.vlanid
= v
->vlanid
;
2674 ionic_set_vf_config(ionic
, i
, &vfc
);
2679 vfc
.attr
= IONIC_VF_ATTR_RATE
;
2680 vfc
.maxrate
= v
->maxrate
;
2681 ionic_set_vf_config(ionic
, i
, &vfc
);
2686 vfc
.attr
= IONIC_VF_ATTR_SPOOFCHK
;
2687 vfc
.spoofchk
= v
->spoofchk
;
2688 ionic_set_vf_config(ionic
, i
, &vfc
);
2693 vfc
.attr
= IONIC_VF_ATTR_TRUST
;
2694 vfc
.trust
= v
->trusted
;
2695 ionic_set_vf_config(ionic
, i
, &vfc
);
2700 vfc
.attr
= IONIC_VF_ATTR_LINKSTATE
;
2701 vfc
.linkstate
= v
->linkstate
;
2702 ionic_set_vf_config(ionic
, i
, &vfc
);
2707 up_read(&ionic
->vf_op_lock
);
2709 ionic_vf_start(ionic
);
2712 static void ionic_xdp_unregister_rxq_info(struct ionic_queue
*q
)
2714 struct xdp_rxq_info
*xi
;
2716 if (!q
->xdp_rxq_info
)
2719 xi
= q
->xdp_rxq_info
;
2720 q
->xdp_rxq_info
= NULL
;
2722 xdp_rxq_info_unreg(xi
);
2726 static int ionic_xdp_register_rxq_info(struct ionic_queue
*q
, unsigned int napi_id
)
2728 struct xdp_rxq_info
*rxq_info
;
2731 rxq_info
= kzalloc(sizeof(*rxq_info
), GFP_KERNEL
);
2735 err
= xdp_rxq_info_reg(rxq_info
, q
->lif
->netdev
, q
->index
, napi_id
);
2737 dev_err(q
->dev
, "Queue %d xdp_rxq_info_reg failed, err %d\n",
2742 err
= xdp_rxq_info_reg_mem_model(rxq_info
, MEM_TYPE_PAGE_ORDER0
, NULL
);
2744 dev_err(q
->dev
, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
2746 xdp_rxq_info_unreg(rxq_info
);
2750 q
->xdp_rxq_info
= rxq_info
;
2759 static int ionic_xdp_queues_config(struct ionic_lif
*lif
)
2767 /* There's no need to rework memory if not going to/from NULL program.
2768 * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
2769 * This way we don't need to keep an *xdp_prog in every queue struct.
2771 if (!lif
->xdp_prog
== !lif
->rxqcqs
[0]->q
.xdp_rxq_info
)
2774 for (i
= 0; i
< lif
->ionic
->nrxqs_per_lif
&& lif
->rxqcqs
[i
]; i
++) {
2775 struct ionic_queue
*q
= &lif
->rxqcqs
[i
]->q
;
2777 if (q
->xdp_rxq_info
) {
2778 ionic_xdp_unregister_rxq_info(q
);
2782 err
= ionic_xdp_register_rxq_info(q
, lif
->rxqcqs
[i
]->napi
.napi_id
);
2784 dev_err(lif
->ionic
->dev
, "failed to register RX queue %d info for XDP, err %d\n",
2793 for (i
= 0; i
< lif
->ionic
->nrxqs_per_lif
&& lif
->rxqcqs
[i
]; i
++)
2794 ionic_xdp_unregister_rxq_info(&lif
->rxqcqs
[i
]->q
);
2799 static int ionic_xdp_config(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
2801 struct ionic_lif
*lif
= netdev_priv(netdev
);
2802 struct bpf_prog
*old_prog
;
2805 if (test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
)) {
2806 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
2807 NL_SET_ERR_MSG_MOD(bpf
->extack
, XDP_ERR_SPLIT
);
2808 netdev_info(lif
->netdev
, XDP_ERR_SPLIT
);
2812 if (!ionic_xdp_is_valid_mtu(lif
, netdev
->mtu
, bpf
->prog
)) {
2813 #define XDP_ERR_MTU "MTU is too large for XDP without frags support"
2814 NL_SET_ERR_MSG_MOD(bpf
->extack
, XDP_ERR_MTU
);
2815 netdev_info(lif
->netdev
, XDP_ERR_MTU
);
2819 maxfs
= __le32_to_cpu(lif
->identity
->eth
.max_frame_size
) - VLAN_ETH_HLEN
;
2820 if (bpf
->prog
&& !(bpf
->prog
->aux
&& bpf
->prog
->aux
->xdp_has_frags
))
2821 maxfs
= min_t(u32
, maxfs
, IONIC_XDP_MAX_LINEAR_MTU
);
2822 netdev
->max_mtu
= maxfs
;
2824 if (!netif_running(netdev
)) {
2825 old_prog
= xchg(&lif
->xdp_prog
, bpf
->prog
);
2827 mutex_lock(&lif
->queue_lock
);
2828 ionic_stop_queues_reconfig(lif
);
2829 old_prog
= xchg(&lif
->xdp_prog
, bpf
->prog
);
2830 ionic_start_queues_reconfig(lif
);
2831 mutex_unlock(&lif
->queue_lock
);
2835 bpf_prog_put(old_prog
);
2840 static int ionic_xdp(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
2842 switch (bpf
->command
) {
2843 case XDP_SETUP_PROG
:
2844 return ionic_xdp_config(netdev
, bpf
);
2850 static const struct net_device_ops ionic_netdev_ops
= {
2851 .ndo_open
= ionic_open
,
2852 .ndo_stop
= ionic_stop
,
2853 .ndo_eth_ioctl
= ionic_eth_ioctl
,
2854 .ndo_start_xmit
= ionic_start_xmit
,
2855 .ndo_bpf
= ionic_xdp
,
2856 .ndo_xdp_xmit
= ionic_xdp_xmit
,
2857 .ndo_get_stats64
= ionic_get_stats64
,
2858 .ndo_set_rx_mode
= ionic_ndo_set_rx_mode
,
2859 .ndo_set_features
= ionic_set_features
,
2860 .ndo_set_mac_address
= ionic_set_mac_address
,
2861 .ndo_validate_addr
= eth_validate_addr
,
2862 .ndo_tx_timeout
= ionic_tx_timeout
,
2863 .ndo_change_mtu
= ionic_change_mtu
,
2864 .ndo_vlan_rx_add_vid
= ionic_vlan_rx_add_vid
,
2865 .ndo_vlan_rx_kill_vid
= ionic_vlan_rx_kill_vid
,
2866 .ndo_set_vf_vlan
= ionic_set_vf_vlan
,
2867 .ndo_set_vf_trust
= ionic_set_vf_trust
,
2868 .ndo_set_vf_mac
= ionic_set_vf_mac
,
2869 .ndo_set_vf_rate
= ionic_set_vf_rate
,
2870 .ndo_set_vf_spoofchk
= ionic_set_vf_spoofchk
,
2871 .ndo_get_vf_config
= ionic_get_vf_config
,
2872 .ndo_set_vf_link_state
= ionic_set_vf_link_state
,
2873 .ndo_get_vf_stats
= ionic_get_vf_stats
,
2876 static int ionic_cmb_reconfig(struct ionic_lif
*lif
,
2877 struct ionic_queue_params
*qparam
)
2879 struct ionic_queue_params start_qparams
;
2882 /* When changing CMB queue parameters, we're using limited
2883 * on-device memory and don't have extra memory to use for
2884 * duplicate allocations, so we free it all first then
2885 * re-allocate with the new parameters.
2888 /* Checkpoint for possible unwind */
2889 ionic_init_queue_params(lif
, &start_qparams
);
2891 /* Stop and free the queues */
2892 ionic_stop_queues_reconfig(lif
);
2893 ionic_txrx_free(lif
);
2895 /* Set up new qparams */
2896 ionic_set_queue_params(lif
, qparam
);
2898 if (netif_running(lif
->netdev
)) {
2899 /* Alloc and start the new configuration */
2900 err
= ionic_txrx_alloc(lif
);
2902 dev_warn(lif
->ionic
->dev
,
2903 "CMB reconfig failed, restoring values: %d\n", err
);
2905 /* Back out the changes */
2906 ionic_set_queue_params(lif
, &start_qparams
);
2907 err
= ionic_txrx_alloc(lif
);
2909 dev_err(lif
->ionic
->dev
,
2910 "CMB restore failed: %d\n", err
);
2915 err
= ionic_start_queues_reconfig(lif
);
2917 dev_err(lif
->ionic
->dev
,
2918 "CMB reconfig failed: %d\n", err
);
2924 /* This was detached in ionic_stop_queues_reconfig() */
2925 netif_device_attach(lif
->netdev
);
2930 static void ionic_swap_queues(struct ionic_qcq
*a
, struct ionic_qcq
*b
)
2932 /* only swapping the queues, not the napi, flags, or other stuff */
2933 swap(a
->q
.features
, b
->q
.features
);
2934 swap(a
->q
.num_descs
, b
->q
.num_descs
);
2935 swap(a
->q
.desc_size
, b
->q
.desc_size
);
2936 swap(a
->q
.base
, b
->q
.base
);
2937 swap(a
->q
.base_pa
, b
->q
.base_pa
);
2938 swap(a
->q
.info
, b
->q
.info
);
2939 swap(a
->q
.xdp_rxq_info
, b
->q
.xdp_rxq_info
);
2940 swap(a
->q
.partner
, b
->q
.partner
);
2941 swap(a
->q_base
, b
->q_base
);
2942 swap(a
->q_base_pa
, b
->q_base_pa
);
2943 swap(a
->q_size
, b
->q_size
);
2945 swap(a
->q
.sg_desc_size
, b
->q
.sg_desc_size
);
2946 swap(a
->q
.sg_base
, b
->q
.sg_base
);
2947 swap(a
->q
.sg_base_pa
, b
->q
.sg_base_pa
);
2948 swap(a
->sg_base
, b
->sg_base
);
2949 swap(a
->sg_base_pa
, b
->sg_base_pa
);
2950 swap(a
->sg_size
, b
->sg_size
);
2952 swap(a
->cq
.num_descs
, b
->cq
.num_descs
);
2953 swap(a
->cq
.desc_size
, b
->cq
.desc_size
);
2954 swap(a
->cq
.base
, b
->cq
.base
);
2955 swap(a
->cq
.base_pa
, b
->cq
.base_pa
);
2956 swap(a
->cq
.info
, b
->cq
.info
);
2957 swap(a
->cq_base
, b
->cq_base
);
2958 swap(a
->cq_base_pa
, b
->cq_base_pa
);
2959 swap(a
->cq_size
, b
->cq_size
);
2961 ionic_debugfs_del_qcq(a
);
2962 ionic_debugfs_add_qcq(a
->q
.lif
, a
);
2965 int ionic_reconfigure_queues(struct ionic_lif
*lif
,
2966 struct ionic_queue_params
*qparam
)
2968 unsigned int comp_sz
, desc_sz
, num_desc
, sg_desc_sz
;
2969 struct ionic_qcq
**tx_qcqs
= NULL
;
2970 struct ionic_qcq
**rx_qcqs
= NULL
;
2971 unsigned int flags
, i
;
2974 /* Are we changing q params while CMB is on */
2975 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS
, lif
->state
) && qparam
->cmb_tx
) ||
2976 (test_bit(IONIC_LIF_F_CMB_RX_RINGS
, lif
->state
) && qparam
->cmb_rx
))
2977 return ionic_cmb_reconfig(lif
, qparam
);
2979 /* allocate temporary qcq arrays to hold new queue structs */
2980 if (qparam
->nxqs
!= lif
->nxqs
|| qparam
->ntxq_descs
!= lif
->ntxq_descs
) {
2981 tx_qcqs
= devm_kcalloc(lif
->ionic
->dev
, lif
->ionic
->ntxqs_per_lif
,
2982 sizeof(struct ionic_qcq
*), GFP_KERNEL
);
2988 if (qparam
->nxqs
!= lif
->nxqs
||
2989 qparam
->nrxq_descs
!= lif
->nrxq_descs
||
2990 qparam
->rxq_features
!= lif
->rxq_features
) {
2991 rx_qcqs
= devm_kcalloc(lif
->ionic
->dev
, lif
->ionic
->nrxqs_per_lif
,
2992 sizeof(struct ionic_qcq
*), GFP_KERNEL
);
2999 /* allocate new desc_info and rings, but leave the interrupt setup
3000 * until later so as to not mess with the still-running queues
3003 num_desc
= qparam
->ntxq_descs
;
3004 desc_sz
= sizeof(struct ionic_txq_desc
);
3005 comp_sz
= sizeof(struct ionic_txq_comp
);
3007 if (lif
->qtype_info
[IONIC_QTYPE_TXQ
].version
>= 1 &&
3008 lif
->qtype_info
[IONIC_QTYPE_TXQ
].sg_desc_sz
==
3009 sizeof(struct ionic_txq_sg_desc_v1
))
3010 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc_v1
);
3012 sg_desc_sz
= sizeof(struct ionic_txq_sg_desc
);
3014 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3015 /* If missing, short placeholder qcq needed for swap */
3016 if (!lif
->txqcqs
[i
]) {
3017 flags
= IONIC_QCQ_F_TX_STATS
| IONIC_QCQ_F_SG
;
3018 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_TXQ
, i
, "tx", flags
,
3019 4, desc_sz
, comp_sz
, sg_desc_sz
,
3020 lif
->kern_pid
, &lif
->txqcqs
[i
]);
3025 flags
= lif
->txqcqs
[i
]->flags
& ~IONIC_QCQ_F_INTR
;
3026 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_TXQ
, i
, "tx", flags
,
3027 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
3028 lif
->kern_pid
, &tx_qcqs
[i
]);
3035 num_desc
= qparam
->nrxq_descs
;
3036 desc_sz
= sizeof(struct ionic_rxq_desc
);
3037 comp_sz
= sizeof(struct ionic_rxq_comp
);
3038 sg_desc_sz
= sizeof(struct ionic_rxq_sg_desc
);
3040 if (qparam
->rxq_features
& IONIC_Q_F_2X_CQ_DESC
)
3043 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3044 /* If missing, short placeholder qcq needed for swap */
3045 if (!lif
->rxqcqs
[i
]) {
3046 flags
= IONIC_QCQ_F_RX_STATS
| IONIC_QCQ_F_SG
;
3047 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_RXQ
, i
, "rx", flags
,
3048 4, desc_sz
, comp_sz
, sg_desc_sz
,
3049 lif
->kern_pid
, &lif
->rxqcqs
[i
]);
3054 flags
= lif
->rxqcqs
[i
]->flags
& ~IONIC_QCQ_F_INTR
;
3055 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_RXQ
, i
, "rx", flags
,
3056 num_desc
, desc_sz
, comp_sz
, sg_desc_sz
,
3057 lif
->kern_pid
, &rx_qcqs
[i
]);
3061 rx_qcqs
[i
]->q
.features
= qparam
->rxq_features
;
3065 /* stop and clean the queues */
3066 ionic_stop_queues_reconfig(lif
);
3068 if (qparam
->nxqs
!= lif
->nxqs
) {
3069 err
= netif_set_real_num_tx_queues(lif
->netdev
, qparam
->nxqs
);
3071 goto err_out_reinit_unlock
;
3072 err
= netif_set_real_num_rx_queues(lif
->netdev
, qparam
->nxqs
);
3074 netif_set_real_num_tx_queues(lif
->netdev
, lif
->nxqs
);
3075 goto err_out_reinit_unlock
;
3079 /* swap new desc_info and rings, keeping existing interrupt config */
3081 lif
->ntxq_descs
= qparam
->ntxq_descs
;
3082 for (i
= 0; i
< qparam
->nxqs
; i
++)
3083 ionic_swap_queues(lif
->txqcqs
[i
], tx_qcqs
[i
]);
3087 lif
->nrxq_descs
= qparam
->nrxq_descs
;
3088 for (i
= 0; i
< qparam
->nxqs
; i
++)
3089 ionic_swap_queues(lif
->rxqcqs
[i
], rx_qcqs
[i
]);
3092 /* if we need to change the interrupt layout, this is the time */
3093 if (qparam
->intr_split
!= test_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
) ||
3094 qparam
->nxqs
!= lif
->nxqs
) {
3095 if (qparam
->intr_split
) {
3096 set_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
);
3098 clear_bit(IONIC_LIF_F_SPLIT_INTR
, lif
->state
);
3099 lif
->tx_coalesce_usecs
= lif
->rx_coalesce_usecs
;
3100 lif
->tx_coalesce_hw
= lif
->rx_coalesce_hw
;
3103 /* Clear existing interrupt assignments. We check for NULL here
3104 * because we're checking the whole array for potential qcqs, not
3105 * just those qcqs that have just been set up.
3107 for (i
= 0; i
< lif
->ionic
->ntxqs_per_lif
; i
++) {
3109 ionic_qcq_intr_free(lif
, lif
->txqcqs
[i
]);
3111 ionic_qcq_intr_free(lif
, lif
->rxqcqs
[i
]);
3114 /* re-assign the interrupts */
3115 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3116 lif
->rxqcqs
[i
]->flags
|= IONIC_QCQ_F_INTR
;
3117 err
= ionic_alloc_qcq_interrupt(lif
, lif
->rxqcqs
[i
]);
3118 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
3119 lif
->rxqcqs
[i
]->intr
.index
,
3120 lif
->rx_coalesce_hw
);
3122 if (qparam
->intr_split
) {
3123 lif
->txqcqs
[i
]->flags
|= IONIC_QCQ_F_INTR
;
3124 err
= ionic_alloc_qcq_interrupt(lif
, lif
->txqcqs
[i
]);
3125 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
3126 lif
->txqcqs
[i
]->intr
.index
,
3127 lif
->tx_coalesce_hw
);
3128 if (test_bit(IONIC_LIF_F_TX_DIM_INTR
, lif
->state
))
3129 lif
->txqcqs
[i
]->intr
.dim_coal_hw
= lif
->tx_coalesce_hw
;
3131 lif
->txqcqs
[i
]->flags
&= ~IONIC_QCQ_F_INTR
;
3132 ionic_link_qcq_interrupts(lif
->rxqcqs
[i
], lif
->txqcqs
[i
]);
3137 /* now we can rework the debugfs mappings */
3139 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3140 ionic_debugfs_del_qcq(lif
->txqcqs
[i
]);
3141 ionic_debugfs_add_qcq(lif
, lif
->txqcqs
[i
]);
3146 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3147 ionic_debugfs_del_qcq(lif
->rxqcqs
[i
]);
3148 ionic_debugfs_add_qcq(lif
, lif
->rxqcqs
[i
]);
3152 swap(lif
->nxqs
, qparam
->nxqs
);
3153 swap(lif
->rxq_features
, qparam
->rxq_features
);
3155 err_out_reinit_unlock
:
3156 /* re-init the queues, but don't lose an error code */
3158 ionic_start_queues_reconfig(lif
);
3160 err
= ionic_start_queues_reconfig(lif
);
3163 /* free old allocs without cleaning intr */
3164 for (i
= 0; i
< qparam
->nxqs
; i
++) {
3165 if (tx_qcqs
&& tx_qcqs
[i
]) {
3166 tx_qcqs
[i
]->flags
&= ~IONIC_QCQ_F_INTR
;
3167 ionic_qcq_free(lif
, tx_qcqs
[i
]);
3168 devm_kfree(lif
->ionic
->dev
, tx_qcqs
[i
]);
3171 if (rx_qcqs
&& rx_qcqs
[i
]) {
3172 rx_qcqs
[i
]->flags
&= ~IONIC_QCQ_F_INTR
;
3173 ionic_qcq_free(lif
, rx_qcqs
[i
]);
3174 devm_kfree(lif
->ionic
->dev
, rx_qcqs
[i
]);
3181 devm_kfree(lif
->ionic
->dev
, rx_qcqs
);
3185 devm_kfree(lif
->ionic
->dev
, tx_qcqs
);
3189 /* clean the unused dma and info allocations when new set is smaller
3190 * than the full array, but leave the qcq shells in place
3192 for (i
= lif
->nxqs
; i
< lif
->ionic
->ntxqs_per_lif
; i
++) {
3193 if (lif
->txqcqs
&& lif
->txqcqs
[i
]) {
3194 lif
->txqcqs
[i
]->flags
&= ~IONIC_QCQ_F_INTR
;
3195 ionic_qcq_free(lif
, lif
->txqcqs
[i
]);
3198 if (lif
->rxqcqs
&& lif
->rxqcqs
[i
]) {
3199 lif
->rxqcqs
[i
]->flags
&= ~IONIC_QCQ_F_INTR
;
3200 ionic_qcq_free(lif
, lif
->rxqcqs
[i
]);
3205 netdev_info(lif
->netdev
, "%s: failed %d\n", __func__
, err
);
3210 int ionic_lif_alloc(struct ionic
*ionic
)
3212 struct device
*dev
= ionic
->dev
;
3213 union ionic_lif_identity
*lid
;
3214 struct net_device
*netdev
;
3215 struct ionic_lif
*lif
;
3219 lid
= kzalloc(sizeof(*lid
), GFP_KERNEL
);
3223 netdev
= alloc_etherdev_mqs(sizeof(*lif
),
3224 ionic
->ntxqs_per_lif
, ionic
->ntxqs_per_lif
);
3226 dev_err(dev
, "Cannot allocate netdev, aborting\n");
3228 goto err_out_free_lid
;
3231 SET_NETDEV_DEV(netdev
, dev
);
3233 lif
= netdev_priv(netdev
);
3234 lif
->netdev
= netdev
;
3237 netdev
->netdev_ops
= &ionic_netdev_ops
;
3238 ionic_ethtool_set_ops(netdev
);
3240 netdev
->watchdog_timeo
= 2 * HZ
;
3241 netif_carrier_off(netdev
);
3243 lif
->identity
= lid
;
3244 lif
->lif_type
= IONIC_LIF_TYPE_CLASSIC
;
3245 err
= ionic_lif_identify(ionic
, lif
->lif_type
, lif
->identity
);
3247 dev_err(ionic
->dev
, "Cannot identify type %d: %d\n",
3248 lif
->lif_type
, err
);
3249 goto err_out_free_netdev
;
3251 lif
->netdev
->min_mtu
= max_t(unsigned int, ETH_MIN_MTU
,
3252 le32_to_cpu(lif
->identity
->eth
.min_frame_size
));
3253 lif
->netdev
->max_mtu
=
3254 le32_to_cpu(lif
->identity
->eth
.max_frame_size
) - ETH_HLEN
- VLAN_HLEN
;
3256 lif
->neqs
= ionic
->neqs_per_lif
;
3257 lif
->nxqs
= ionic
->ntxqs_per_lif
;
3261 if (is_kdump_kernel()) {
3262 lif
->ntxq_descs
= IONIC_MIN_TXRX_DESC
;
3263 lif
->nrxq_descs
= IONIC_MIN_TXRX_DESC
;
3265 lif
->ntxq_descs
= IONIC_DEF_TXRX_DESC
;
3266 lif
->nrxq_descs
= IONIC_DEF_TXRX_DESC
;
3269 /* Convert the default coalesce value to actual hw resolution */
3270 lif
->rx_coalesce_usecs
= IONIC_ITR_COAL_USEC_DEFAULT
;
3271 lif
->rx_coalesce_hw
= ionic_coal_usec_to_hw(lif
->ionic
,
3272 lif
->rx_coalesce_usecs
);
3273 lif
->tx_coalesce_usecs
= lif
->rx_coalesce_usecs
;
3274 lif
->tx_coalesce_hw
= lif
->rx_coalesce_hw
;
3275 set_bit(IONIC_LIF_F_RX_DIM_INTR
, lif
->state
);
3276 set_bit(IONIC_LIF_F_TX_DIM_INTR
, lif
->state
);
3278 snprintf(lif
->name
, sizeof(lif
->name
), "lif%u", lif
->index
);
3280 mutex_init(&lif
->queue_lock
);
3281 mutex_init(&lif
->config_lock
);
3283 spin_lock_init(&lif
->adminq_lock
);
3285 spin_lock_init(&lif
->deferred
.lock
);
3286 INIT_LIST_HEAD(&lif
->deferred
.list
);
3287 INIT_WORK(&lif
->deferred
.work
, ionic_lif_deferred_work
);
3289 /* allocate lif info */
3290 lif
->info_sz
= ALIGN(sizeof(*lif
->info
), PAGE_SIZE
);
3291 lif
->info
= dma_alloc_coherent(dev
, lif
->info_sz
,
3292 &lif
->info_pa
, GFP_KERNEL
);
3294 dev_err(dev
, "Failed to allocate lif info, aborting\n");
3296 goto err_out_free_mutex
;
3299 ionic_debugfs_add_lif(lif
);
3301 /* allocate control queues and txrx queue arrays */
3302 ionic_lif_queue_identify(lif
);
3303 err
= ionic_qcqs_alloc(lif
);
3305 goto err_out_free_lif_info
;
3307 /* allocate rss indirection table */
3308 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
3309 lif
->rss_ind_tbl_sz
= sizeof(*lif
->rss_ind_tbl
) * tbl_sz
;
3310 lif
->rss_ind_tbl
= dma_alloc_coherent(dev
, lif
->rss_ind_tbl_sz
,
3311 &lif
->rss_ind_tbl_pa
,
3314 if (!lif
->rss_ind_tbl
) {
3316 dev_err(dev
, "Failed to allocate rss indirection table, aborting\n");
3317 goto err_out_free_qcqs
;
3319 netdev_rss_key_fill(lif
->rss_hash_key
, IONIC_RSS_HASH_KEY_SIZE
);
3321 ionic_lif_alloc_phc(lif
);
3326 ionic_qcqs_free(lif
);
3327 err_out_free_lif_info
:
3328 dma_free_coherent(dev
, lif
->info_sz
, lif
->info
, lif
->info_pa
);
3332 mutex_destroy(&lif
->config_lock
);
3333 mutex_destroy(&lif
->queue_lock
);
3334 err_out_free_netdev
:
3335 free_netdev(lif
->netdev
);
3343 static void ionic_lif_reset(struct ionic_lif
*lif
)
3345 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
3347 if (!ionic_is_fw_running(idev
))
3350 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
3351 ionic_dev_cmd_lif_reset(idev
, lif
->index
);
3352 ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
3353 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
3356 static void ionic_lif_handle_fw_down(struct ionic_lif
*lif
)
3358 struct ionic
*ionic
= lif
->ionic
;
3360 if (test_and_set_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
3363 dev_info(ionic
->dev
, "FW Down: Stopping LIFs\n");
3365 netif_device_detach(lif
->netdev
);
3367 mutex_lock(&lif
->queue_lock
);
3368 if (test_bit(IONIC_LIF_F_UP
, lif
->state
)) {
3369 dev_info(ionic
->dev
, "Surprise FW stop, stopping queues\n");
3370 ionic_stop_queues(lif
);
3373 if (netif_running(lif
->netdev
)) {
3374 ionic_txrx_deinit(lif
);
3375 ionic_txrx_free(lif
);
3377 ionic_lif_deinit(lif
);
3379 ionic_qcqs_free(lif
);
3381 mutex_unlock(&lif
->queue_lock
);
3383 clear_bit(IONIC_LIF_F_FW_STOPPING
, lif
->state
);
3384 dev_info(ionic
->dev
, "FW Down: LIFs stopped\n");
3387 int ionic_restart_lif(struct ionic_lif
*lif
)
3389 struct ionic
*ionic
= lif
->ionic
;
3392 mutex_lock(&lif
->queue_lock
);
3394 if (test_and_clear_bit(IONIC_LIF_F_BROKEN
, lif
->state
))
3395 dev_info(ionic
->dev
, "FW Up: clearing broken state\n");
3397 err
= ionic_qcqs_alloc(lif
);
3401 err
= ionic_lif_init(lif
);
3405 ionic_vf_attr_replay(lif
);
3407 if (lif
->registered
)
3408 ionic_lif_set_netdev_info(lif
);
3410 ionic_rx_filter_replay(lif
);
3412 if (netif_running(lif
->netdev
)) {
3413 err
= ionic_txrx_alloc(lif
);
3415 goto err_lifs_deinit
;
3417 err
= ionic_txrx_init(lif
);
3422 mutex_unlock(&lif
->queue_lock
);
3424 clear_bit(IONIC_LIF_F_FW_RESET
, lif
->state
);
3425 ionic_link_status_check_request(lif
, CAN_SLEEP
);
3426 netif_device_attach(lif
->netdev
);
3431 ionic_txrx_free(lif
);
3433 ionic_lif_deinit(lif
);
3435 ionic_qcqs_free(lif
);
3437 mutex_unlock(&lif
->queue_lock
);
3442 static void ionic_lif_handle_fw_up(struct ionic_lif
*lif
)
3444 struct ionic
*ionic
= lif
->ionic
;
3447 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
3450 dev_info(ionic
->dev
, "FW Up: restarting LIFs\n");
3452 /* This is a little different from what happens at
3453 * probe time because the LIF already exists so we
3454 * just need to reanimate it.
3456 ionic_init_devinfo(ionic
);
3457 err
= ionic_identify(ionic
);
3460 err
= ionic_port_identify(ionic
);
3463 err
= ionic_port_init(ionic
);
3467 err
= ionic_restart_lif(lif
);
3471 dev_info(ionic
->dev
, "FW Up: LIFs restarted\n");
3473 /* restore the hardware timestamping queues */
3474 ionic_lif_hwstamp_replay(lif
);
3479 dev_err(ionic
->dev
, "FW Up: LIFs restart failed - err %d\n", err
);
3482 void ionic_lif_free(struct ionic_lif
*lif
)
3484 struct device
*dev
= lif
->ionic
->dev
;
3486 ionic_lif_free_phc(lif
);
3488 /* free rss indirection table */
3489 dma_free_coherent(dev
, lif
->rss_ind_tbl_sz
, lif
->rss_ind_tbl
,
3490 lif
->rss_ind_tbl_pa
);
3491 lif
->rss_ind_tbl
= NULL
;
3492 lif
->rss_ind_tbl_pa
= 0;
3495 ionic_qcqs_free(lif
);
3496 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
3497 ionic_lif_reset(lif
);
3500 kfree(lif
->identity
);
3501 dma_free_coherent(dev
, lif
->info_sz
, lif
->info
, lif
->info_pa
);
3505 /* unmap doorbell page */
3506 ionic_bus_unmap_dbpage(lif
->ionic
, lif
->kern_dbpage
);
3507 lif
->kern_dbpage
= NULL
;
3509 mutex_destroy(&lif
->config_lock
);
3510 mutex_destroy(&lif
->queue_lock
);
3512 /* free netdev & lif */
3513 ionic_debugfs_del_lif(lif
);
3514 free_netdev(lif
->netdev
);
3517 void ionic_lif_deinit(struct ionic_lif
*lif
)
3519 if (!test_and_clear_bit(IONIC_LIF_F_INITED
, lif
->state
))
3522 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
)) {
3523 cancel_work_sync(&lif
->deferred
.work
);
3524 cancel_work_sync(&lif
->tx_timeout_work
);
3525 ionic_rx_filters_deinit(lif
);
3526 if (lif
->netdev
->features
& NETIF_F_RXHASH
)
3527 ionic_lif_rss_deinit(lif
);
3530 napi_disable(&lif
->adminqcq
->napi
);
3531 ionic_lif_qcq_deinit(lif
, lif
->notifyqcq
);
3532 ionic_lif_qcq_deinit(lif
, lif
->adminqcq
);
3534 ionic_lif_reset(lif
);
3537 static int ionic_lif_adminq_init(struct ionic_lif
*lif
)
3539 struct device
*dev
= lif
->ionic
->dev
;
3540 struct ionic_q_init_comp comp
;
3541 struct ionic_dev
*idev
;
3542 struct ionic_qcq
*qcq
;
3543 struct ionic_queue
*q
;
3546 idev
= &lif
->ionic
->idev
;
3547 qcq
= lif
->adminqcq
;
3550 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
3551 ionic_dev_cmd_adminq_init(idev
, qcq
, lif
->index
, qcq
->intr
.index
);
3552 err
= ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
3553 ionic_dev_cmd_comp(idev
, (union ionic_dev_cmd_comp
*)&comp
);
3554 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
3556 netdev_err(lif
->netdev
, "adminq init failed %d\n", err
);
3560 q
->hw_type
= comp
.hw_type
;
3561 q
->hw_index
= le32_to_cpu(comp
.hw_index
);
3562 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
3564 dev_dbg(dev
, "adminq->hw_type %d\n", q
->hw_type
);
3565 dev_dbg(dev
, "adminq->hw_index %d\n", q
->hw_index
);
3567 q
->dbell_deadline
= IONIC_ADMIN_DOORBELL_DEADLINE
;
3568 q
->dbell_jiffies
= jiffies
;
3570 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_adminq_napi
);
3572 qcq
->napi_qcq
= qcq
;
3573 timer_setup(&qcq
->napi_deadline
, ionic_napi_deadline
, 0);
3575 napi_enable(&qcq
->napi
);
3577 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
3578 irq_set_affinity_hint(qcq
->intr
.vector
,
3579 &qcq
->intr
.affinity_mask
);
3580 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
3581 IONIC_INTR_MASK_CLEAR
);
3584 qcq
->flags
|= IONIC_QCQ_F_INITED
;
3589 static int ionic_lif_notifyq_init(struct ionic_lif
*lif
)
3591 struct ionic_qcq
*qcq
= lif
->notifyqcq
;
3592 struct device
*dev
= lif
->ionic
->dev
;
3593 struct ionic_queue
*q
= &qcq
->q
;
3596 struct ionic_admin_ctx ctx
= {
3597 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
3599 .opcode
= IONIC_CMD_Q_INIT
,
3600 .lif_index
= cpu_to_le16(lif
->index
),
3602 .ver
= lif
->qtype_info
[q
->type
].version
,
3603 .index
= cpu_to_le32(q
->index
),
3604 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
|
3606 .intr_index
= cpu_to_le16(lif
->adminqcq
->intr
.index
),
3607 .pid
= cpu_to_le16(q
->pid
),
3608 .ring_size
= ilog2(q
->num_descs
),
3609 .ring_base
= cpu_to_le64(q
->base_pa
),
3613 dev_dbg(dev
, "notifyq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
3614 dev_dbg(dev
, "notifyq_init.index %d\n", ctx
.cmd
.q_init
.index
);
3615 dev_dbg(dev
, "notifyq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
3616 dev_dbg(dev
, "notifyq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
3618 err
= ionic_adminq_post_wait(lif
, &ctx
);
3623 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
3624 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
3625 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
3627 dev_dbg(dev
, "notifyq->hw_type %d\n", q
->hw_type
);
3628 dev_dbg(dev
, "notifyq->hw_index %d\n", q
->hw_index
);
3630 /* preset the callback info */
3631 q
->info
[0].cb_arg
= lif
;
3633 qcq
->flags
|= IONIC_QCQ_F_INITED
;
3638 static int ionic_station_set(struct ionic_lif
*lif
)
3640 struct net_device
*netdev
= lif
->netdev
;
3641 struct ionic_admin_ctx ctx
= {
3642 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
3643 .cmd
.lif_getattr
= {
3644 .opcode
= IONIC_CMD_LIF_GETATTR
,
3645 .index
= cpu_to_le16(lif
->index
),
3646 .attr
= IONIC_LIF_ATTR_MAC
,
3649 u8 mac_address
[ETH_ALEN
];
3650 struct sockaddr addr
;
3653 err
= ionic_adminq_post_wait(lif
, &ctx
);
3656 netdev_dbg(lif
->netdev
, "found initial MAC addr %pM\n",
3657 ctx
.comp
.lif_getattr
.mac
);
3658 ether_addr_copy(mac_address
, ctx
.comp
.lif_getattr
.mac
);
3660 if (is_zero_ether_addr(mac_address
)) {
3661 eth_hw_addr_random(netdev
);
3662 netdev_dbg(netdev
, "Random Mac generated: %pM\n", netdev
->dev_addr
);
3663 ether_addr_copy(mac_address
, netdev
->dev_addr
);
3665 err
= ionic_program_mac(lif
, mac_address
);
3670 netdev_dbg(netdev
, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3676 if (!is_zero_ether_addr(netdev
->dev_addr
)) {
3677 /* If the netdev mac is non-zero and doesn't match the default
3678 * device address, it was set by something earlier and we're
3679 * likely here again after a fw-upgrade reset. We need to be
3680 * sure the netdev mac is in our filter list.
3682 if (!ether_addr_equal(mac_address
, netdev
->dev_addr
))
3683 ionic_lif_addr_add(lif
, netdev
->dev_addr
);
3685 /* Update the netdev mac with the device's mac */
3686 ether_addr_copy(addr
.sa_data
, mac_address
);
3687 addr
.sa_family
= AF_INET
;
3688 err
= eth_prepare_mac_addr_change(netdev
, &addr
);
3690 netdev_warn(lif
->netdev
, "ignoring bad MAC addr from NIC %pM - err %d\n",
3695 eth_commit_mac_addr_change(netdev
, &addr
);
3698 netdev_dbg(lif
->netdev
, "adding station MAC addr %pM\n",
3700 ionic_lif_addr_add(lif
, netdev
->dev_addr
);
3705 int ionic_lif_init(struct ionic_lif
*lif
)
3707 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
3708 struct device
*dev
= lif
->ionic
->dev
;
3709 struct ionic_lif_init_comp comp
;
3713 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
3714 ionic_dev_cmd_lif_init(idev
, lif
->index
, lif
->info_pa
);
3715 err
= ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
3716 ionic_dev_cmd_comp(idev
, (union ionic_dev_cmd_comp
*)&comp
);
3717 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
3721 lif
->hw_index
= le16_to_cpu(comp
.hw_index
);
3723 /* now that we have the hw_index we can figure out our doorbell page */
3724 lif
->dbid_count
= le32_to_cpu(lif
->ionic
->ident
.dev
.ndbpgs_per_lif
);
3725 if (!lif
->dbid_count
) {
3726 dev_err(dev
, "No doorbell pages, aborting\n");
3731 dbpage_num
= ionic_db_page_num(lif
, lif
->kern_pid
);
3732 lif
->kern_dbpage
= ionic_bus_map_dbpage(lif
->ionic
, dbpage_num
);
3733 if (!lif
->kern_dbpage
) {
3734 dev_err(dev
, "Cannot map dbpage, aborting\n");
3738 err
= ionic_lif_adminq_init(lif
);
3740 goto err_out_adminq_deinit
;
3742 if (lif
->ionic
->nnqs_per_lif
) {
3743 err
= ionic_lif_notifyq_init(lif
);
3745 goto err_out_notifyq_deinit
;
3748 if (test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
3749 err
= ionic_set_nic_features(lif
, lif
->netdev
->features
);
3751 err
= ionic_init_nic_features(lif
);
3753 goto err_out_notifyq_deinit
;
3755 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
)) {
3756 err
= ionic_rx_filters_init(lif
);
3758 goto err_out_notifyq_deinit
;
3761 err
= ionic_station_set(lif
);
3763 goto err_out_notifyq_deinit
;
3765 lif
->rx_copybreak
= IONIC_RX_COPYBREAK_DEFAULT
;
3767 set_bit(IONIC_LIF_F_INITED
, lif
->state
);
3769 INIT_WORK(&lif
->tx_timeout_work
, ionic_tx_timeout_work
);
3773 err_out_notifyq_deinit
:
3774 napi_disable(&lif
->adminqcq
->napi
);
3775 ionic_lif_qcq_deinit(lif
, lif
->notifyqcq
);
3776 err_out_adminq_deinit
:
3777 ionic_lif_qcq_deinit(lif
, lif
->adminqcq
);
3778 ionic_lif_reset(lif
);
3779 ionic_bus_unmap_dbpage(lif
->ionic
, lif
->kern_dbpage
);
3780 lif
->kern_dbpage
= NULL
;
3785 static void ionic_lif_notify_work(struct work_struct
*ws
)
3789 static void ionic_lif_set_netdev_info(struct ionic_lif
*lif
)
3791 struct ionic_admin_ctx ctx
= {
3792 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
3793 .cmd
.lif_setattr
= {
3794 .opcode
= IONIC_CMD_LIF_SETATTR
,
3795 .index
= cpu_to_le16(lif
->index
),
3796 .attr
= IONIC_LIF_ATTR_NAME
,
3800 strscpy(ctx
.cmd
.lif_setattr
.name
, lif
->netdev
->name
,
3801 sizeof(ctx
.cmd
.lif_setattr
.name
));
3803 ionic_adminq_post_wait(lif
, &ctx
);
3806 static struct ionic_lif
*ionic_netdev_lif(struct net_device
*netdev
)
3808 if (!netdev
|| netdev
->netdev_ops
->ndo_start_xmit
!= ionic_start_xmit
)
3811 return netdev_priv(netdev
);
3814 static int ionic_lif_notify(struct notifier_block
*nb
,
3815 unsigned long event
, void *info
)
3817 struct net_device
*ndev
= netdev_notifier_info_to_dev(info
);
3818 struct ionic
*ionic
= container_of(nb
, struct ionic
, nb
);
3819 struct ionic_lif
*lif
= ionic_netdev_lif(ndev
);
3821 if (!lif
|| lif
->ionic
!= ionic
)
3825 case NETDEV_CHANGENAME
:
3826 ionic_lif_set_netdev_info(lif
);
3833 int ionic_lif_register(struct ionic_lif
*lif
)
3837 ionic_lif_register_phc(lif
);
3839 INIT_WORK(&lif
->ionic
->nb_work
, ionic_lif_notify_work
);
3841 lif
->ionic
->nb
.notifier_call
= ionic_lif_notify
;
3843 err
= register_netdevice_notifier(&lif
->ionic
->nb
);
3845 lif
->ionic
->nb
.notifier_call
= NULL
;
3847 /* only register LIF0 for now */
3848 err
= register_netdev(lif
->netdev
);
3850 dev_err(lif
->ionic
->dev
, "Cannot register net device, aborting\n");
3851 ionic_lif_unregister_phc(lif
);
3855 ionic_link_status_check_request(lif
, CAN_SLEEP
);
3856 lif
->registered
= true;
3857 ionic_lif_set_netdev_info(lif
);
3862 void ionic_lif_unregister(struct ionic_lif
*lif
)
3864 if (lif
->ionic
->nb
.notifier_call
) {
3865 unregister_netdevice_notifier(&lif
->ionic
->nb
);
3866 cancel_work_sync(&lif
->ionic
->nb_work
);
3867 lif
->ionic
->nb
.notifier_call
= NULL
;
3870 if (lif
->netdev
->reg_state
== NETREG_REGISTERED
)
3871 unregister_netdev(lif
->netdev
);
3873 ionic_lif_unregister_phc(lif
);
3875 lif
->registered
= false;
3878 static void ionic_lif_queue_identify(struct ionic_lif
*lif
)
3880 union ionic_q_identity __iomem
*q_ident
;
3881 struct ionic
*ionic
= lif
->ionic
;
3882 struct ionic_dev
*idev
;
3886 idev
= &lif
->ionic
->idev
;
3887 q_ident
= (union ionic_q_identity __iomem
*)&idev
->dev_cmd_regs
->data
;
3889 for (qtype
= 0; qtype
< ARRAY_SIZE(ionic_qtype_versions
); qtype
++) {
3890 struct ionic_qtype_info
*qti
= &lif
->qtype_info
[qtype
];
3892 /* filter out the ones we know about */
3894 case IONIC_QTYPE_ADMINQ
:
3895 case IONIC_QTYPE_NOTIFYQ
:
3896 case IONIC_QTYPE_RXQ
:
3897 case IONIC_QTYPE_TXQ
:
3903 memset(qti
, 0, sizeof(*qti
));
3905 mutex_lock(&ionic
->dev_cmd_lock
);
3906 ionic_dev_cmd_queue_identify(idev
, lif
->lif_type
, qtype
,
3907 ionic_qtype_versions
[qtype
]);
3908 err
= ionic_dev_cmd_wait(ionic
, DEVCMD_TIMEOUT
);
3910 qti
->version
= readb(&q_ident
->version
);
3911 qti
->supported
= readb(&q_ident
->supported
);
3912 qti
->features
= readq(&q_ident
->features
);
3913 qti
->desc_sz
= readw(&q_ident
->desc_sz
);
3914 qti
->comp_sz
= readw(&q_ident
->comp_sz
);
3915 qti
->sg_desc_sz
= readw(&q_ident
->sg_desc_sz
);
3916 qti
->max_sg_elems
= readw(&q_ident
->max_sg_elems
);
3917 qti
->sg_desc_stride
= readw(&q_ident
->sg_desc_stride
);
3919 mutex_unlock(&ionic
->dev_cmd_lock
);
3921 if (err
== -EINVAL
) {
3922 dev_err(ionic
->dev
, "qtype %d not supported\n", qtype
);
3924 } else if (err
== -EIO
) {
3925 dev_err(ionic
->dev
, "q_ident failed, not supported on older FW\n");
3928 dev_err(ionic
->dev
, "q_ident failed, qtype %d: %d\n",
3933 dev_dbg(ionic
->dev
, " qtype[%d].version = %d\n",
3934 qtype
, qti
->version
);
3935 dev_dbg(ionic
->dev
, " qtype[%d].supported = 0x%02x\n",
3936 qtype
, qti
->supported
);
3937 dev_dbg(ionic
->dev
, " qtype[%d].features = 0x%04llx\n",
3938 qtype
, qti
->features
);
3939 dev_dbg(ionic
->dev
, " qtype[%d].desc_sz = %d\n",
3940 qtype
, qti
->desc_sz
);
3941 dev_dbg(ionic
->dev
, " qtype[%d].comp_sz = %d\n",
3942 qtype
, qti
->comp_sz
);
3943 dev_dbg(ionic
->dev
, " qtype[%d].sg_desc_sz = %d\n",
3944 qtype
, qti
->sg_desc_sz
);
3945 dev_dbg(ionic
->dev
, " qtype[%d].max_sg_elems = %d\n",
3946 qtype
, qti
->max_sg_elems
);
3947 dev_dbg(ionic
->dev
, " qtype[%d].sg_desc_stride = %d\n",
3948 qtype
, qti
->sg_desc_stride
);
3950 if (qti
->max_sg_elems
>= IONIC_MAX_FRAGS
) {
3951 qti
->max_sg_elems
= IONIC_MAX_FRAGS
- 1;
3952 dev_dbg(ionic
->dev
, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
3953 qtype
, qti
->max_sg_elems
);
3956 if (qti
->max_sg_elems
> MAX_SKB_FRAGS
) {
3957 qti
->max_sg_elems
= MAX_SKB_FRAGS
;
3958 dev_dbg(ionic
->dev
, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
3959 qtype
, qti
->max_sg_elems
);
3964 int ionic_lif_identify(struct ionic
*ionic
, u8 lif_type
,
3965 union ionic_lif_identity
*lid
)
3967 struct ionic_dev
*idev
= &ionic
->idev
;
3971 sz
= min(sizeof(*lid
), sizeof(idev
->dev_cmd_regs
->data
));
3973 mutex_lock(&ionic
->dev_cmd_lock
);
3974 ionic_dev_cmd_lif_identify(idev
, lif_type
, IONIC_IDENTITY_VERSION_1
);
3975 err
= ionic_dev_cmd_wait(ionic
, DEVCMD_TIMEOUT
);
3976 memcpy_fromio(lid
, &idev
->dev_cmd_regs
->data
, sz
);
3977 mutex_unlock(&ionic
->dev_cmd_lock
);
3981 dev_dbg(ionic
->dev
, "capabilities 0x%llx\n",
3982 le64_to_cpu(lid
->capabilities
));
3984 dev_dbg(ionic
->dev
, "eth.max_ucast_filters %d\n",
3985 le32_to_cpu(lid
->eth
.max_ucast_filters
));
3986 dev_dbg(ionic
->dev
, "eth.max_mcast_filters %d\n",
3987 le32_to_cpu(lid
->eth
.max_mcast_filters
));
3988 dev_dbg(ionic
->dev
, "eth.features 0x%llx\n",
3989 le64_to_cpu(lid
->eth
.config
.features
));
3990 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3991 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_ADMINQ
]));
3992 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3993 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_NOTIFYQ
]));
3994 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3995 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_RXQ
]));
3996 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3997 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_TXQ
]));
3998 dev_dbg(ionic
->dev
, "eth.config.name %s\n", lid
->eth
.config
.name
);
3999 dev_dbg(ionic
->dev
, "eth.config.mac %pM\n", lid
->eth
.config
.mac
);
4000 dev_dbg(ionic
->dev
, "eth.config.mtu %d\n",
4001 le32_to_cpu(lid
->eth
.config
.mtu
));
4006 int ionic_lif_size(struct ionic
*ionic
)
4008 struct ionic_identity
*ident
= &ionic
->ident
;
4009 unsigned int nintrs
, dev_nintrs
;
4010 union ionic_lif_config
*lc
;
4011 unsigned int ntxqs_per_lif
;
4012 unsigned int nrxqs_per_lif
;
4013 unsigned int neqs_per_lif
;
4014 unsigned int nnqs_per_lif
;
4015 unsigned int nxqs
, neqs
;
4016 unsigned int min_intrs
;
4019 /* retrieve basic values from FW */
4020 lc
= &ident
->lif
.eth
.config
;
4021 dev_nintrs
= le32_to_cpu(ident
->dev
.nintrs
);
4022 neqs_per_lif
= le32_to_cpu(ident
->lif
.rdma
.eq_qtype
.qid_count
);
4023 nnqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_NOTIFYQ
]);
4024 ntxqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_TXQ
]);
4025 nrxqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_RXQ
]);
4027 /* limit values to play nice with kdump */
4028 if (is_kdump_kernel()) {
4036 /* reserve last queue id for hardware timestamping */
4037 if (lc
->features
& cpu_to_le64(IONIC_ETH_HW_TIMESTAMP
)) {
4038 if (ntxqs_per_lif
<= 1 || nrxqs_per_lif
<= 1) {
4039 lc
->features
&= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP
);
4046 nxqs
= min(ntxqs_per_lif
, nrxqs_per_lif
);
4047 nxqs
= min(nxqs
, num_online_cpus());
4048 neqs
= min(neqs_per_lif
, num_online_cpus());
4052 * 1 for master lif adminq/notifyq
4053 * 1 for each CPU for master lif TxRx queue pairs
4054 * whatever's left is for RDMA queues
4056 nintrs
= 1 + nxqs
+ neqs
;
4057 min_intrs
= 2; /* adminq + 1 TxRx queue pair */
4059 if (nintrs
> dev_nintrs
)
4062 err
= ionic_bus_alloc_irq_vectors(ionic
, nintrs
);
4063 if (err
< 0 && err
!= -ENOSPC
) {
4064 dev_err(ionic
->dev
, "Can't get intrs from OS: %d\n", err
);
4070 if (err
!= nintrs
) {
4071 ionic_bus_free_irq_vectors(ionic
);
4075 ionic
->nnqs_per_lif
= nnqs_per_lif
;
4076 ionic
->neqs_per_lif
= neqs
;
4077 ionic
->ntxqs_per_lif
= nxqs
;
4078 ionic
->nrxqs_per_lif
= nxqs
;
4079 ionic
->nintrs
= nintrs
;
4081 ionic_debugfs_add_sizes(ionic
);
4086 if (nnqs_per_lif
> 1) {
4098 dev_err(ionic
->dev
, "Can't get minimum %d intrs from OS\n", min_intrs
);