1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
20 static void ionic_lif_rx_mode(struct ionic_lif
*lif
, unsigned int rx_mode
);
21 static int ionic_lif_addr_add(struct ionic_lif
*lif
, const u8
*addr
);
22 static int ionic_lif_addr_del(struct ionic_lif
*lif
, const u8
*addr
);
23 static void ionic_link_status_check(struct ionic_lif
*lif
);
24 static void ionic_lif_handle_fw_down(struct ionic_lif
*lif
);
25 static void ionic_lif_handle_fw_up(struct ionic_lif
*lif
);
26 static void ionic_lif_set_netdev_info(struct ionic_lif
*lif
);
28 static int ionic_start_queues(struct ionic_lif
*lif
);
29 static void ionic_stop_queues(struct ionic_lif
*lif
);
31 static void ionic_lif_deferred_work(struct work_struct
*work
)
33 struct ionic_lif
*lif
= container_of(work
, struct ionic_lif
, deferred
.work
);
34 struct ionic_deferred
*def
= &lif
->deferred
;
35 struct ionic_deferred_work
*w
= NULL
;
37 spin_lock_bh(&def
->lock
);
38 if (!list_empty(&def
->list
)) {
39 w
= list_first_entry(&def
->list
,
40 struct ionic_deferred_work
, list
);
43 spin_unlock_bh(&def
->lock
);
47 case IONIC_DW_TYPE_RX_MODE
:
48 ionic_lif_rx_mode(lif
, w
->rx_mode
);
50 case IONIC_DW_TYPE_RX_ADDR_ADD
:
51 ionic_lif_addr_add(lif
, w
->addr
);
53 case IONIC_DW_TYPE_RX_ADDR_DEL
:
54 ionic_lif_addr_del(lif
, w
->addr
);
56 case IONIC_DW_TYPE_LINK_STATUS
:
57 ionic_link_status_check(lif
);
59 case IONIC_DW_TYPE_LIF_RESET
:
61 ionic_lif_handle_fw_up(lif
);
63 ionic_lif_handle_fw_down(lif
);
69 schedule_work(&def
->work
);
73 void ionic_lif_deferred_enqueue(struct ionic_deferred
*def
,
74 struct ionic_deferred_work
*work
)
76 spin_lock_bh(&def
->lock
);
77 list_add_tail(&work
->list
, &def
->list
);
78 spin_unlock_bh(&def
->lock
);
79 schedule_work(&def
->work
);
82 static void ionic_link_status_check(struct ionic_lif
*lif
)
84 struct net_device
*netdev
= lif
->netdev
;
88 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
))
91 if (lif
->ionic
->is_mgmt_nic
)
94 link_status
= le16_to_cpu(lif
->info
->status
.link_status
);
95 link_up
= link_status
== IONIC_PORT_OPER_STATUS_UP
;
98 if (!netif_carrier_ok(netdev
)) {
101 ionic_port_identify(lif
->ionic
);
102 link_speed
= le32_to_cpu(lif
->info
->status
.link_speed
);
103 netdev_info(netdev
, "Link up - %d Gbps\n",
105 netif_carrier_on(netdev
);
108 if (netif_running(lif
->netdev
))
109 ionic_start_queues(lif
);
111 if (netif_carrier_ok(netdev
)) {
112 netdev_info(netdev
, "Link down\n");
113 netif_carrier_off(netdev
);
116 if (netif_running(lif
->netdev
))
117 ionic_stop_queues(lif
);
120 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
);
123 void ionic_link_status_check_request(struct ionic_lif
*lif
)
125 struct ionic_deferred_work
*work
;
127 /* we only need one request outstanding at a time */
128 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED
, lif
->state
))
131 if (in_interrupt()) {
132 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
136 work
->type
= IONIC_DW_TYPE_LINK_STATUS
;
137 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
139 ionic_link_status_check(lif
);
143 static irqreturn_t
ionic_isr(int irq
, void *data
)
145 struct napi_struct
*napi
= data
;
147 napi_schedule_irqoff(napi
);
152 static int ionic_request_irq(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
154 struct ionic_intr_info
*intr
= &qcq
->intr
;
155 struct device
*dev
= lif
->ionic
->dev
;
156 struct ionic_queue
*q
= &qcq
->q
;
160 name
= lif
->netdev
->name
;
162 name
= dev_name(dev
);
164 snprintf(intr
->name
, sizeof(intr
->name
),
165 "%s-%s-%s", IONIC_DRV_NAME
, name
, q
->name
);
167 return devm_request_irq(dev
, intr
->vector
, ionic_isr
,
168 0, intr
->name
, &qcq
->napi
);
171 static int ionic_intr_alloc(struct ionic_lif
*lif
, struct ionic_intr_info
*intr
)
173 struct ionic
*ionic
= lif
->ionic
;
176 index
= find_first_zero_bit(ionic
->intrs
, ionic
->nintrs
);
177 if (index
== ionic
->nintrs
) {
178 netdev_warn(lif
->netdev
, "%s: no intr, index=%d nintrs=%d\n",
179 __func__
, index
, ionic
->nintrs
);
183 set_bit(index
, ionic
->intrs
);
184 ionic_intr_init(&ionic
->idev
, intr
, index
);
189 static void ionic_intr_free(struct ionic_lif
*lif
, int index
)
191 if (index
!= INTR_INDEX_NOT_ASSIGNED
&& index
< lif
->ionic
->nintrs
)
192 clear_bit(index
, lif
->ionic
->intrs
);
195 static int ionic_qcq_enable(struct ionic_qcq
*qcq
)
197 struct ionic_queue
*q
= &qcq
->q
;
198 struct ionic_lif
*lif
= q
->lif
;
199 struct ionic_dev
*idev
;
202 struct ionic_admin_ctx ctx
= {
203 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
205 .opcode
= IONIC_CMD_Q_CONTROL
,
206 .lif_index
= cpu_to_le16(lif
->index
),
208 .index
= cpu_to_le32(q
->index
),
209 .oper
= IONIC_Q_ENABLE
,
213 idev
= &lif
->ionic
->idev
;
214 dev
= lif
->ionic
->dev
;
216 dev_dbg(dev
, "q_enable.index %d q_enable.qtype %d\n",
217 ctx
.cmd
.q_control
.index
, ctx
.cmd
.q_control
.type
);
219 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
220 irq_set_affinity_hint(qcq
->intr
.vector
,
221 &qcq
->intr
.affinity_mask
);
222 napi_enable(&qcq
->napi
);
223 ionic_intr_clean(idev
->intr_ctrl
, qcq
->intr
.index
);
224 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
225 IONIC_INTR_MASK_CLEAR
);
228 return ionic_adminq_post_wait(lif
, &ctx
);
231 static int ionic_qcq_disable(struct ionic_qcq
*qcq
)
233 struct ionic_queue
*q
= &qcq
->q
;
234 struct ionic_lif
*lif
= q
->lif
;
235 struct ionic_dev
*idev
;
238 struct ionic_admin_ctx ctx
= {
239 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
241 .opcode
= IONIC_CMD_Q_CONTROL
,
242 .lif_index
= cpu_to_le16(lif
->index
),
244 .index
= cpu_to_le32(q
->index
),
245 .oper
= IONIC_Q_DISABLE
,
249 idev
= &lif
->ionic
->idev
;
250 dev
= lif
->ionic
->dev
;
252 dev_dbg(dev
, "q_disable.index %d q_disable.qtype %d\n",
253 ctx
.cmd
.q_control
.index
, ctx
.cmd
.q_control
.type
);
255 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
256 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
257 IONIC_INTR_MASK_SET
);
258 synchronize_irq(qcq
->intr
.vector
);
259 irq_set_affinity_hint(qcq
->intr
.vector
, NULL
);
260 napi_disable(&qcq
->napi
);
263 return ionic_adminq_post_wait(lif
, &ctx
);
266 static void ionic_lif_qcq_deinit(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
268 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
273 if (!(qcq
->flags
& IONIC_QCQ_F_INITED
))
276 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
277 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
278 IONIC_INTR_MASK_SET
);
279 netif_napi_del(&qcq
->napi
);
282 qcq
->flags
&= ~IONIC_QCQ_F_INITED
;
285 static void ionic_qcq_free(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
287 struct device
*dev
= lif
->ionic
->dev
;
292 ionic_debugfs_del_qcq(qcq
);
294 dma_free_coherent(dev
, qcq
->total_size
, qcq
->base
, qcq
->base_pa
);
298 if (qcq
->flags
& IONIC_QCQ_F_INTR
) {
299 irq_set_affinity_hint(qcq
->intr
.vector
, NULL
);
300 devm_free_irq(dev
, qcq
->intr
.vector
, &qcq
->napi
);
301 qcq
->intr
.vector
= 0;
302 ionic_intr_free(lif
, qcq
->intr
.index
);
305 devm_kfree(dev
, qcq
->cq
.info
);
307 devm_kfree(dev
, qcq
->q
.info
);
309 devm_kfree(dev
, qcq
);
312 static void ionic_qcqs_free(struct ionic_lif
*lif
)
314 struct device
*dev
= lif
->ionic
->dev
;
317 if (lif
->notifyqcq
) {
318 ionic_qcq_free(lif
, lif
->notifyqcq
);
319 lif
->notifyqcq
= NULL
;
323 ionic_qcq_free(lif
, lif
->adminqcq
);
324 lif
->adminqcq
= NULL
;
328 for (i
= 0; i
< lif
->nxqs
; i
++)
329 if (lif
->rxqcqs
[i
].stats
)
330 devm_kfree(dev
, lif
->rxqcqs
[i
].stats
);
331 devm_kfree(dev
, lif
->rxqcqs
);
336 for (i
= 0; i
< lif
->nxqs
; i
++)
337 if (lif
->txqcqs
[i
].stats
)
338 devm_kfree(dev
, lif
->txqcqs
[i
].stats
);
339 devm_kfree(dev
, lif
->txqcqs
);
344 static void ionic_link_qcq_interrupts(struct ionic_qcq
*src_qcq
,
345 struct ionic_qcq
*n_qcq
)
347 if (WARN_ON(n_qcq
->flags
& IONIC_QCQ_F_INTR
)) {
348 ionic_intr_free(n_qcq
->cq
.lif
, n_qcq
->intr
.index
);
349 n_qcq
->flags
&= ~IONIC_QCQ_F_INTR
;
352 n_qcq
->intr
.vector
= src_qcq
->intr
.vector
;
353 n_qcq
->intr
.index
= src_qcq
->intr
.index
;
356 static int ionic_qcq_alloc(struct ionic_lif
*lif
, unsigned int type
,
358 const char *name
, unsigned int flags
,
359 unsigned int num_descs
, unsigned int desc_size
,
360 unsigned int cq_desc_size
,
361 unsigned int sg_desc_size
,
362 unsigned int pid
, struct ionic_qcq
**qcq
)
364 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
365 u32 q_size
, cq_size
, sg_size
, total_size
;
366 struct device
*dev
= lif
->ionic
->dev
;
367 void *q_base
, *cq_base
, *sg_base
;
368 dma_addr_t cq_base_pa
= 0;
369 dma_addr_t sg_base_pa
= 0;
370 dma_addr_t q_base_pa
= 0;
371 struct ionic_qcq
*new;
376 q_size
= num_descs
* desc_size
;
377 cq_size
= num_descs
* cq_desc_size
;
378 sg_size
= num_descs
* sg_desc_size
;
380 total_size
= ALIGN(q_size
, PAGE_SIZE
) + ALIGN(cq_size
, PAGE_SIZE
);
381 /* Note: aligning q_size/cq_size is not enough due to cq_base
382 * address aligning as q_base could be not aligned to the page.
385 total_size
+= PAGE_SIZE
;
386 if (flags
& IONIC_QCQ_F_SG
) {
387 total_size
+= ALIGN(sg_size
, PAGE_SIZE
);
388 total_size
+= PAGE_SIZE
;
391 new = devm_kzalloc(dev
, sizeof(*new), GFP_KERNEL
);
393 netdev_err(lif
->netdev
, "Cannot allocate queue structure\n");
400 new->q
.info
= devm_kzalloc(dev
, sizeof(*new->q
.info
) * num_descs
,
403 netdev_err(lif
->netdev
, "Cannot allocate queue info\n");
410 err
= ionic_q_init(lif
, idev
, &new->q
, index
, name
, num_descs
,
411 desc_size
, sg_desc_size
, pid
);
413 netdev_err(lif
->netdev
, "Cannot initialize queue\n");
417 if (flags
& IONIC_QCQ_F_INTR
) {
418 err
= ionic_intr_alloc(lif
, &new->intr
);
420 netdev_warn(lif
->netdev
, "no intr for %s: %d\n",
425 err
= ionic_bus_get_irq(lif
->ionic
, new->intr
.index
);
427 netdev_warn(lif
->netdev
, "no vector for %s: %d\n",
429 goto err_out_free_intr
;
431 new->intr
.vector
= err
;
432 ionic_intr_mask_assert(idev
->intr_ctrl
, new->intr
.index
,
433 IONIC_INTR_MASK_SET
);
435 err
= ionic_request_irq(lif
, new);
437 netdev_warn(lif
->netdev
, "irq request failed %d\n", err
);
438 goto err_out_free_intr
;
441 new->intr
.cpu
= cpumask_local_spread(new->intr
.index
,
443 if (new->intr
.cpu
!= -1)
444 cpumask_set_cpu(new->intr
.cpu
,
445 &new->intr
.affinity_mask
);
447 new->intr
.index
= INTR_INDEX_NOT_ASSIGNED
;
450 new->cq
.info
= devm_kzalloc(dev
, sizeof(*new->cq
.info
) * num_descs
,
453 netdev_err(lif
->netdev
, "Cannot allocate completion queue info\n");
455 goto err_out_free_irq
;
458 err
= ionic_cq_init(lif
, &new->cq
, &new->intr
, num_descs
, cq_desc_size
);
460 netdev_err(lif
->netdev
, "Cannot initialize completion queue\n");
461 goto err_out_free_irq
;
464 new->base
= dma_alloc_coherent(dev
, total_size
, &new->base_pa
,
467 netdev_err(lif
->netdev
, "Cannot allocate queue DMA memory\n");
469 goto err_out_free_irq
;
472 new->total_size
= total_size
;
475 q_base_pa
= new->base_pa
;
477 cq_base
= (void *)ALIGN((uintptr_t)q_base
+ q_size
, PAGE_SIZE
);
478 cq_base_pa
= ALIGN(q_base_pa
+ q_size
, PAGE_SIZE
);
480 if (flags
& IONIC_QCQ_F_SG
) {
481 sg_base
= (void *)ALIGN((uintptr_t)cq_base
+ cq_size
,
483 sg_base_pa
= ALIGN(cq_base_pa
+ cq_size
, PAGE_SIZE
);
484 ionic_q_sg_map(&new->q
, sg_base
, sg_base_pa
);
487 ionic_q_map(&new->q
, q_base
, q_base_pa
);
488 ionic_cq_map(&new->cq
, cq_base
, cq_base_pa
);
489 ionic_cq_bind(&new->cq
, &new->q
);
496 if (flags
& IONIC_QCQ_F_INTR
)
497 devm_free_irq(dev
, new->intr
.vector
, &new->napi
);
499 if (flags
& IONIC_QCQ_F_INTR
)
500 ionic_intr_free(lif
, new->intr
.index
);
502 dev_err(dev
, "qcq alloc of %s%d failed %d\n", name
, index
, err
);
506 static int ionic_qcqs_alloc(struct ionic_lif
*lif
)
508 struct device
*dev
= lif
->ionic
->dev
;
509 unsigned int q_list_size
;
514 flags
= IONIC_QCQ_F_INTR
;
515 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_ADMINQ
, 0, "admin", flags
,
517 sizeof(struct ionic_admin_cmd
),
518 sizeof(struct ionic_admin_comp
),
519 0, lif
->kern_pid
, &lif
->adminqcq
);
522 ionic_debugfs_add_qcq(lif
, lif
->adminqcq
);
524 if (lif
->ionic
->nnqs_per_lif
) {
525 flags
= IONIC_QCQ_F_NOTIFYQ
;
526 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_NOTIFYQ
, 0, "notifyq",
527 flags
, IONIC_NOTIFYQ_LENGTH
,
528 sizeof(struct ionic_notifyq_cmd
),
529 sizeof(union ionic_notifyq_comp
),
530 0, lif
->kern_pid
, &lif
->notifyqcq
);
532 goto err_out_free_adminqcq
;
533 ionic_debugfs_add_qcq(lif
, lif
->notifyqcq
);
535 /* Let the notifyq ride on the adminq interrupt */
536 ionic_link_qcq_interrupts(lif
->adminqcq
, lif
->notifyqcq
);
539 q_list_size
= sizeof(*lif
->txqcqs
) * lif
->nxqs
;
541 lif
->txqcqs
= devm_kzalloc(dev
, q_list_size
, GFP_KERNEL
);
543 goto err_out_free_notifyqcq
;
544 for (i
= 0; i
< lif
->nxqs
; i
++) {
545 lif
->txqcqs
[i
].stats
= devm_kzalloc(dev
,
546 sizeof(struct ionic_q_stats
),
548 if (!lif
->txqcqs
[i
].stats
)
549 goto err_out_free_tx_stats
;
552 lif
->rxqcqs
= devm_kzalloc(dev
, q_list_size
, GFP_KERNEL
);
554 goto err_out_free_tx_stats
;
555 for (i
= 0; i
< lif
->nxqs
; i
++) {
556 lif
->rxqcqs
[i
].stats
= devm_kzalloc(dev
,
557 sizeof(struct ionic_q_stats
),
559 if (!lif
->rxqcqs
[i
].stats
)
560 goto err_out_free_rx_stats
;
565 err_out_free_rx_stats
:
566 for (i
= 0; i
< lif
->nxqs
; i
++)
567 if (lif
->rxqcqs
[i
].stats
)
568 devm_kfree(dev
, lif
->rxqcqs
[i
].stats
);
569 devm_kfree(dev
, lif
->rxqcqs
);
571 err_out_free_tx_stats
:
572 for (i
= 0; i
< lif
->nxqs
; i
++)
573 if (lif
->txqcqs
[i
].stats
)
574 devm_kfree(dev
, lif
->txqcqs
[i
].stats
);
575 devm_kfree(dev
, lif
->txqcqs
);
577 err_out_free_notifyqcq
:
578 if (lif
->notifyqcq
) {
579 ionic_qcq_free(lif
, lif
->notifyqcq
);
580 lif
->notifyqcq
= NULL
;
582 err_out_free_adminqcq
:
583 ionic_qcq_free(lif
, lif
->adminqcq
);
584 lif
->adminqcq
= NULL
;
589 static int ionic_lif_txq_init(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
591 struct device
*dev
= lif
->ionic
->dev
;
592 struct ionic_queue
*q
= &qcq
->q
;
593 struct ionic_cq
*cq
= &qcq
->cq
;
594 struct ionic_admin_ctx ctx
= {
595 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
597 .opcode
= IONIC_CMD_Q_INIT
,
598 .lif_index
= cpu_to_le16(lif
->index
),
600 .index
= cpu_to_le32(q
->index
),
601 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
|
603 .intr_index
= cpu_to_le16(lif
->rxqcqs
[q
->index
].qcq
->intr
.index
),
604 .pid
= cpu_to_le16(q
->pid
),
605 .ring_size
= ilog2(q
->num_descs
),
606 .ring_base
= cpu_to_le64(q
->base_pa
),
607 .cq_ring_base
= cpu_to_le64(cq
->base_pa
),
608 .sg_ring_base
= cpu_to_le64(q
->sg_base_pa
),
613 dev_dbg(dev
, "txq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
614 dev_dbg(dev
, "txq_init.index %d\n", ctx
.cmd
.q_init
.index
);
615 dev_dbg(dev
, "txq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
616 dev_dbg(dev
, "txq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
622 err
= ionic_adminq_post_wait(lif
, &ctx
);
626 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
627 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
628 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
630 dev_dbg(dev
, "txq->hw_type %d\n", q
->hw_type
);
631 dev_dbg(dev
, "txq->hw_index %d\n", q
->hw_index
);
633 qcq
->flags
|= IONIC_QCQ_F_INITED
;
638 static int ionic_lif_rxq_init(struct ionic_lif
*lif
, struct ionic_qcq
*qcq
)
640 struct device
*dev
= lif
->ionic
->dev
;
641 struct ionic_queue
*q
= &qcq
->q
;
642 struct ionic_cq
*cq
= &qcq
->cq
;
643 struct ionic_admin_ctx ctx
= {
644 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
646 .opcode
= IONIC_CMD_Q_INIT
,
647 .lif_index
= cpu_to_le16(lif
->index
),
649 .index
= cpu_to_le32(q
->index
),
650 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
|
652 .intr_index
= cpu_to_le16(cq
->bound_intr
->index
),
653 .pid
= cpu_to_le16(q
->pid
),
654 .ring_size
= ilog2(q
->num_descs
),
655 .ring_base
= cpu_to_le64(q
->base_pa
),
656 .cq_ring_base
= cpu_to_le64(cq
->base_pa
),
657 .sg_ring_base
= cpu_to_le64(q
->sg_base_pa
),
662 dev_dbg(dev
, "rxq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
663 dev_dbg(dev
, "rxq_init.index %d\n", ctx
.cmd
.q_init
.index
);
664 dev_dbg(dev
, "rxq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
665 dev_dbg(dev
, "rxq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
671 err
= ionic_adminq_post_wait(lif
, &ctx
);
675 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
676 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
677 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
679 dev_dbg(dev
, "rxq->hw_type %d\n", q
->hw_type
);
680 dev_dbg(dev
, "rxq->hw_index %d\n", q
->hw_index
);
682 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_rx_napi
,
685 qcq
->flags
|= IONIC_QCQ_F_INITED
;
690 static bool ionic_notifyq_service(struct ionic_cq
*cq
,
691 struct ionic_cq_info
*cq_info
)
693 union ionic_notifyq_comp
*comp
= cq_info
->cq_desc
;
694 struct ionic_deferred_work
*work
;
695 struct net_device
*netdev
;
696 struct ionic_queue
*q
;
697 struct ionic_lif
*lif
;
701 lif
= q
->info
[0].cb_arg
;
702 netdev
= lif
->netdev
;
703 eid
= le64_to_cpu(comp
->event
.eid
);
705 /* Have we run out of new completions to process? */
706 if (eid
<= lif
->last_eid
)
711 dev_dbg(lif
->ionic
->dev
, "notifyq event:\n");
712 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET
, 16, 1,
713 comp
, sizeof(*comp
), true);
715 switch (le16_to_cpu(comp
->event
.ecode
)) {
716 case IONIC_EVENT_LINK_CHANGE
:
717 ionic_link_status_check_request(lif
);
719 case IONIC_EVENT_RESET
:
720 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
722 netdev_err(lif
->netdev
, "%s OOM\n", __func__
);
724 work
->type
= IONIC_DW_TYPE_LIF_RESET
;
725 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
729 netdev_warn(netdev
, "Notifyq unknown event ecode=%d eid=%lld\n",
730 comp
->event
.ecode
, eid
);
737 static int ionic_notifyq_clean(struct ionic_lif
*lif
, int budget
)
739 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
740 struct ionic_cq
*cq
= &lif
->notifyqcq
->cq
;
743 work_done
= ionic_cq_service(cq
, budget
, ionic_notifyq_service
,
746 ionic_intr_credits(idev
->intr_ctrl
, cq
->bound_intr
->index
,
747 work_done
, IONIC_INTR_CRED_RESET_COALESCE
);
752 static bool ionic_adminq_service(struct ionic_cq
*cq
,
753 struct ionic_cq_info
*cq_info
)
755 struct ionic_admin_comp
*comp
= cq_info
->cq_desc
;
757 if (!color_match(comp
->color
, cq
->done_color
))
760 ionic_q_service(cq
->bound_q
, cq_info
, le16_to_cpu(comp
->comp_index
));
765 static int ionic_adminq_napi(struct napi_struct
*napi
, int budget
)
767 struct ionic_lif
*lif
= napi_to_cq(napi
)->lif
;
771 if (likely(lif
->notifyqcq
&& lif
->notifyqcq
->flags
& IONIC_QCQ_F_INITED
))
772 n_work
= ionic_notifyq_clean(lif
, budget
);
773 a_work
= ionic_napi(napi
, budget
, ionic_adminq_service
, NULL
, NULL
);
775 return max(n_work
, a_work
);
778 static void ionic_get_stats64(struct net_device
*netdev
,
779 struct rtnl_link_stats64
*ns
)
781 struct ionic_lif
*lif
= netdev_priv(netdev
);
782 struct ionic_lif_stats
*ls
;
784 memset(ns
, 0, sizeof(*ns
));
785 ls
= &lif
->info
->stats
;
787 ns
->rx_packets
= le64_to_cpu(ls
->rx_ucast_packets
) +
788 le64_to_cpu(ls
->rx_mcast_packets
) +
789 le64_to_cpu(ls
->rx_bcast_packets
);
791 ns
->tx_packets
= le64_to_cpu(ls
->tx_ucast_packets
) +
792 le64_to_cpu(ls
->tx_mcast_packets
) +
793 le64_to_cpu(ls
->tx_bcast_packets
);
795 ns
->rx_bytes
= le64_to_cpu(ls
->rx_ucast_bytes
) +
796 le64_to_cpu(ls
->rx_mcast_bytes
) +
797 le64_to_cpu(ls
->rx_bcast_bytes
);
799 ns
->tx_bytes
= le64_to_cpu(ls
->tx_ucast_bytes
) +
800 le64_to_cpu(ls
->tx_mcast_bytes
) +
801 le64_to_cpu(ls
->tx_bcast_bytes
);
803 ns
->rx_dropped
= le64_to_cpu(ls
->rx_ucast_drop_packets
) +
804 le64_to_cpu(ls
->rx_mcast_drop_packets
) +
805 le64_to_cpu(ls
->rx_bcast_drop_packets
);
807 ns
->tx_dropped
= le64_to_cpu(ls
->tx_ucast_drop_packets
) +
808 le64_to_cpu(ls
->tx_mcast_drop_packets
) +
809 le64_to_cpu(ls
->tx_bcast_drop_packets
);
811 ns
->multicast
= le64_to_cpu(ls
->rx_mcast_packets
);
813 ns
->rx_over_errors
= le64_to_cpu(ls
->rx_queue_empty
);
815 ns
->rx_missed_errors
= le64_to_cpu(ls
->rx_dma_error
) +
816 le64_to_cpu(ls
->rx_queue_disabled
) +
817 le64_to_cpu(ls
->rx_desc_fetch_error
) +
818 le64_to_cpu(ls
->rx_desc_data_error
);
820 ns
->tx_aborted_errors
= le64_to_cpu(ls
->tx_dma_error
) +
821 le64_to_cpu(ls
->tx_queue_disabled
) +
822 le64_to_cpu(ls
->tx_desc_fetch_error
) +
823 le64_to_cpu(ls
->tx_desc_data_error
);
825 ns
->rx_errors
= ns
->rx_over_errors
+
826 ns
->rx_missed_errors
;
828 ns
->tx_errors
= ns
->tx_aborted_errors
;
831 static int ionic_lif_addr_add(struct ionic_lif
*lif
, const u8
*addr
)
833 struct ionic_admin_ctx ctx
= {
834 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
835 .cmd
.rx_filter_add
= {
836 .opcode
= IONIC_CMD_RX_FILTER_ADD
,
837 .lif_index
= cpu_to_le16(lif
->index
),
838 .match
= cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC
),
841 struct ionic_rx_filter
*f
;
844 /* don't bother if we already have it */
845 spin_lock_bh(&lif
->rx_filters
.lock
);
846 f
= ionic_rx_filter_by_addr(lif
, addr
);
847 spin_unlock_bh(&lif
->rx_filters
.lock
);
851 netdev_dbg(lif
->netdev
, "rx_filter add ADDR %pM (id %d)\n", addr
,
852 ctx
.comp
.rx_filter_add
.filter_id
);
854 memcpy(ctx
.cmd
.rx_filter_add
.mac
.addr
, addr
, ETH_ALEN
);
855 err
= ionic_adminq_post_wait(lif
, &ctx
);
856 if (err
&& err
!= -EEXIST
)
859 return ionic_rx_filter_save(lif
, 0, IONIC_RXQ_INDEX_ANY
, 0, &ctx
);
862 static int ionic_lif_addr_del(struct ionic_lif
*lif
, const u8
*addr
)
864 struct ionic_admin_ctx ctx
= {
865 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
866 .cmd
.rx_filter_del
= {
867 .opcode
= IONIC_CMD_RX_FILTER_DEL
,
868 .lif_index
= cpu_to_le16(lif
->index
),
871 struct ionic_rx_filter
*f
;
874 spin_lock_bh(&lif
->rx_filters
.lock
);
875 f
= ionic_rx_filter_by_addr(lif
, addr
);
877 spin_unlock_bh(&lif
->rx_filters
.lock
);
881 ctx
.cmd
.rx_filter_del
.filter_id
= cpu_to_le32(f
->filter_id
);
882 ionic_rx_filter_free(lif
, f
);
883 spin_unlock_bh(&lif
->rx_filters
.lock
);
885 err
= ionic_adminq_post_wait(lif
, &ctx
);
886 if (err
&& err
!= -EEXIST
)
889 netdev_dbg(lif
->netdev
, "rx_filter del ADDR %pM (id %d)\n", addr
,
890 ctx
.cmd
.rx_filter_del
.filter_id
);
895 static int ionic_lif_addr(struct ionic_lif
*lif
, const u8
*addr
, bool add
)
897 struct ionic
*ionic
= lif
->ionic
;
898 struct ionic_deferred_work
*work
;
899 unsigned int nmfilters
;
900 unsigned int nufilters
;
903 /* Do we have space for this filter? We test the counters
904 * here before checking the need for deferral so that we
905 * can return an overflow error to the stack.
907 nmfilters
= le32_to_cpu(ionic
->ident
.lif
.eth
.max_mcast_filters
);
908 nufilters
= le32_to_cpu(ionic
->ident
.lif
.eth
.max_ucast_filters
);
910 if ((is_multicast_ether_addr(addr
) && lif
->nmcast
< nmfilters
))
912 else if (!is_multicast_ether_addr(addr
) &&
913 lif
->nucast
< nufilters
)
918 if (is_multicast_ether_addr(addr
) && lif
->nmcast
)
920 else if (!is_multicast_ether_addr(addr
) && lif
->nucast
)
924 if (in_interrupt()) {
925 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
927 netdev_err(lif
->netdev
, "%s OOM\n", __func__
);
930 work
->type
= add
? IONIC_DW_TYPE_RX_ADDR_ADD
:
931 IONIC_DW_TYPE_RX_ADDR_DEL
;
932 memcpy(work
->addr
, addr
, ETH_ALEN
);
933 netdev_dbg(lif
->netdev
, "deferred: rx_filter %s %pM\n",
934 add
? "add" : "del", addr
);
935 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
937 netdev_dbg(lif
->netdev
, "rx_filter %s %pM\n",
938 add
? "add" : "del", addr
);
940 return ionic_lif_addr_add(lif
, addr
);
942 return ionic_lif_addr_del(lif
, addr
);
948 static int ionic_addr_add(struct net_device
*netdev
, const u8
*addr
)
950 return ionic_lif_addr(netdev_priv(netdev
), addr
, true);
953 static int ionic_addr_del(struct net_device
*netdev
, const u8
*addr
)
955 return ionic_lif_addr(netdev_priv(netdev
), addr
, false);
958 static void ionic_lif_rx_mode(struct ionic_lif
*lif
, unsigned int rx_mode
)
960 struct ionic_admin_ctx ctx
= {
961 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
963 .opcode
= IONIC_CMD_RX_MODE_SET
,
964 .lif_index
= cpu_to_le16(lif
->index
),
965 .rx_mode
= cpu_to_le16(rx_mode
),
971 #define REMAIN(__x) (sizeof(buf) - (__x))
973 i
= scnprintf(buf
, sizeof(buf
), "rx_mode 0x%04x -> 0x%04x:",
974 lif
->rx_mode
, rx_mode
);
975 if (rx_mode
& IONIC_RX_MODE_F_UNICAST
)
976 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_UNICAST");
977 if (rx_mode
& IONIC_RX_MODE_F_MULTICAST
)
978 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_MULTICAST");
979 if (rx_mode
& IONIC_RX_MODE_F_BROADCAST
)
980 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_BROADCAST");
981 if (rx_mode
& IONIC_RX_MODE_F_PROMISC
)
982 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_PROMISC");
983 if (rx_mode
& IONIC_RX_MODE_F_ALLMULTI
)
984 i
+= scnprintf(&buf
[i
], REMAIN(i
), " RX_MODE_F_ALLMULTI");
985 netdev_dbg(lif
->netdev
, "lif%d %s\n", lif
->index
, buf
);
987 err
= ionic_adminq_post_wait(lif
, &ctx
);
989 netdev_warn(lif
->netdev
, "set rx_mode 0x%04x failed: %d\n",
992 lif
->rx_mode
= rx_mode
;
995 static void _ionic_lif_rx_mode(struct ionic_lif
*lif
, unsigned int rx_mode
)
997 struct ionic_deferred_work
*work
;
999 if (in_interrupt()) {
1000 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
1002 netdev_err(lif
->netdev
, "%s OOM\n", __func__
);
1005 work
->type
= IONIC_DW_TYPE_RX_MODE
;
1006 work
->rx_mode
= rx_mode
;
1007 netdev_dbg(lif
->netdev
, "deferred: rx_mode\n");
1008 ionic_lif_deferred_enqueue(&lif
->deferred
, work
);
1010 ionic_lif_rx_mode(lif
, rx_mode
);
1014 static void ionic_set_rx_mode(struct net_device
*netdev
)
1016 struct ionic_lif
*lif
= netdev_priv(netdev
);
1017 struct ionic_identity
*ident
;
1018 unsigned int nfilters
;
1019 unsigned int rx_mode
;
1021 ident
= &lif
->ionic
->ident
;
1023 rx_mode
= IONIC_RX_MODE_F_UNICAST
;
1024 rx_mode
|= (netdev
->flags
& IFF_MULTICAST
) ? IONIC_RX_MODE_F_MULTICAST
: 0;
1025 rx_mode
|= (netdev
->flags
& IFF_BROADCAST
) ? IONIC_RX_MODE_F_BROADCAST
: 0;
1026 rx_mode
|= (netdev
->flags
& IFF_PROMISC
) ? IONIC_RX_MODE_F_PROMISC
: 0;
1027 rx_mode
|= (netdev
->flags
& IFF_ALLMULTI
) ? IONIC_RX_MODE_F_ALLMULTI
: 0;
1029 /* sync unicast addresses
1030 * next check to see if we're in an overflow state
1031 * if so, we track that we overflowed and enable NIC PROMISC
1032 * else if the overflow is set and not needed
1033 * we remove our overflow flag and check the netdev flags
1034 * to see if we can disable NIC PROMISC
1036 __dev_uc_sync(netdev
, ionic_addr_add
, ionic_addr_del
);
1037 nfilters
= le32_to_cpu(ident
->lif
.eth
.max_ucast_filters
);
1038 if (netdev_uc_count(netdev
) + 1 > nfilters
) {
1039 rx_mode
|= IONIC_RX_MODE_F_PROMISC
;
1040 lif
->uc_overflow
= true;
1041 } else if (lif
->uc_overflow
) {
1042 lif
->uc_overflow
= false;
1043 if (!(netdev
->flags
& IFF_PROMISC
))
1044 rx_mode
&= ~IONIC_RX_MODE_F_PROMISC
;
1047 /* same for multicast */
1048 __dev_mc_sync(netdev
, ionic_addr_add
, ionic_addr_del
);
1049 nfilters
= le32_to_cpu(ident
->lif
.eth
.max_mcast_filters
);
1050 if (netdev_mc_count(netdev
) > nfilters
) {
1051 rx_mode
|= IONIC_RX_MODE_F_ALLMULTI
;
1052 lif
->mc_overflow
= true;
1053 } else if (lif
->mc_overflow
) {
1054 lif
->mc_overflow
= false;
1055 if (!(netdev
->flags
& IFF_ALLMULTI
))
1056 rx_mode
&= ~IONIC_RX_MODE_F_ALLMULTI
;
1059 if (lif
->rx_mode
!= rx_mode
)
1060 _ionic_lif_rx_mode(lif
, rx_mode
);
1063 static __le64
ionic_netdev_features_to_nic(netdev_features_t features
)
1067 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
1068 wanted
|= IONIC_ETH_HW_VLAN_TX_TAG
;
1069 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1070 wanted
|= IONIC_ETH_HW_VLAN_RX_STRIP
;
1071 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1072 wanted
|= IONIC_ETH_HW_VLAN_RX_FILTER
;
1073 if (features
& NETIF_F_RXHASH
)
1074 wanted
|= IONIC_ETH_HW_RX_HASH
;
1075 if (features
& NETIF_F_RXCSUM
)
1076 wanted
|= IONIC_ETH_HW_RX_CSUM
;
1077 if (features
& NETIF_F_SG
)
1078 wanted
|= IONIC_ETH_HW_TX_SG
;
1079 if (features
& NETIF_F_HW_CSUM
)
1080 wanted
|= IONIC_ETH_HW_TX_CSUM
;
1081 if (features
& NETIF_F_TSO
)
1082 wanted
|= IONIC_ETH_HW_TSO
;
1083 if (features
& NETIF_F_TSO6
)
1084 wanted
|= IONIC_ETH_HW_TSO_IPV6
;
1085 if (features
& NETIF_F_TSO_ECN
)
1086 wanted
|= IONIC_ETH_HW_TSO_ECN
;
1087 if (features
& NETIF_F_GSO_GRE
)
1088 wanted
|= IONIC_ETH_HW_TSO_GRE
;
1089 if (features
& NETIF_F_GSO_GRE_CSUM
)
1090 wanted
|= IONIC_ETH_HW_TSO_GRE_CSUM
;
1091 if (features
& NETIF_F_GSO_IPXIP4
)
1092 wanted
|= IONIC_ETH_HW_TSO_IPXIP4
;
1093 if (features
& NETIF_F_GSO_IPXIP6
)
1094 wanted
|= IONIC_ETH_HW_TSO_IPXIP6
;
1095 if (features
& NETIF_F_GSO_UDP_TUNNEL
)
1096 wanted
|= IONIC_ETH_HW_TSO_UDP
;
1097 if (features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
)
1098 wanted
|= IONIC_ETH_HW_TSO_UDP_CSUM
;
1100 return cpu_to_le64(wanted
);
1103 static int ionic_set_nic_features(struct ionic_lif
*lif
,
1104 netdev_features_t features
)
1106 struct device
*dev
= lif
->ionic
->dev
;
1107 struct ionic_admin_ctx ctx
= {
1108 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1109 .cmd
.lif_setattr
= {
1110 .opcode
= IONIC_CMD_LIF_SETATTR
,
1111 .index
= cpu_to_le16(lif
->index
),
1112 .attr
= IONIC_LIF_ATTR_FEATURES
,
1115 u64 vlan_flags
= IONIC_ETH_HW_VLAN_TX_TAG
|
1116 IONIC_ETH_HW_VLAN_RX_STRIP
|
1117 IONIC_ETH_HW_VLAN_RX_FILTER
;
1118 u64 old_hw_features
;
1121 ctx
.cmd
.lif_setattr
.features
= ionic_netdev_features_to_nic(features
);
1122 err
= ionic_adminq_post_wait(lif
, &ctx
);
1126 old_hw_features
= lif
->hw_features
;
1127 lif
->hw_features
= le64_to_cpu(ctx
.cmd
.lif_setattr
.features
&
1128 ctx
.comp
.lif_setattr
.features
);
1130 if ((old_hw_features
^ lif
->hw_features
) & IONIC_ETH_HW_RX_HASH
)
1131 ionic_lif_rss_config(lif
, lif
->rss_types
, NULL
, NULL
);
1133 if ((vlan_flags
& features
) &&
1134 !(vlan_flags
& le64_to_cpu(ctx
.comp
.lif_setattr
.features
)))
1135 dev_info_once(lif
->ionic
->dev
, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1137 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_TX_TAG
)
1138 dev_dbg(dev
, "feature ETH_HW_VLAN_TX_TAG\n");
1139 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_STRIP
)
1140 dev_dbg(dev
, "feature ETH_HW_VLAN_RX_STRIP\n");
1141 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_FILTER
)
1142 dev_dbg(dev
, "feature ETH_HW_VLAN_RX_FILTER\n");
1143 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
)
1144 dev_dbg(dev
, "feature ETH_HW_RX_HASH\n");
1145 if (lif
->hw_features
& IONIC_ETH_HW_TX_SG
)
1146 dev_dbg(dev
, "feature ETH_HW_TX_SG\n");
1147 if (lif
->hw_features
& IONIC_ETH_HW_TX_CSUM
)
1148 dev_dbg(dev
, "feature ETH_HW_TX_CSUM\n");
1149 if (lif
->hw_features
& IONIC_ETH_HW_RX_CSUM
)
1150 dev_dbg(dev
, "feature ETH_HW_RX_CSUM\n");
1151 if (lif
->hw_features
& IONIC_ETH_HW_TSO
)
1152 dev_dbg(dev
, "feature ETH_HW_TSO\n");
1153 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPV6
)
1154 dev_dbg(dev
, "feature ETH_HW_TSO_IPV6\n");
1155 if (lif
->hw_features
& IONIC_ETH_HW_TSO_ECN
)
1156 dev_dbg(dev
, "feature ETH_HW_TSO_ECN\n");
1157 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE
)
1158 dev_dbg(dev
, "feature ETH_HW_TSO_GRE\n");
1159 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE_CSUM
)
1160 dev_dbg(dev
, "feature ETH_HW_TSO_GRE_CSUM\n");
1161 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP4
)
1162 dev_dbg(dev
, "feature ETH_HW_TSO_IPXIP4\n");
1163 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP6
)
1164 dev_dbg(dev
, "feature ETH_HW_TSO_IPXIP6\n");
1165 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP
)
1166 dev_dbg(dev
, "feature ETH_HW_TSO_UDP\n");
1167 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP_CSUM
)
1168 dev_dbg(dev
, "feature ETH_HW_TSO_UDP_CSUM\n");
1173 static int ionic_init_nic_features(struct ionic_lif
*lif
)
1175 struct net_device
*netdev
= lif
->netdev
;
1176 netdev_features_t features
;
1179 /* no netdev features on the management device */
1180 if (lif
->ionic
->is_mgmt_nic
)
1183 /* set up what we expect to support by default */
1184 features
= NETIF_F_HW_VLAN_CTAG_TX
|
1185 NETIF_F_HW_VLAN_CTAG_RX
|
1186 NETIF_F_HW_VLAN_CTAG_FILTER
|
1195 err
= ionic_set_nic_features(lif
, features
);
1199 /* tell the netdev what we actually can support */
1200 netdev
->features
|= NETIF_F_HIGHDMA
;
1202 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_TX_TAG
)
1203 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
1204 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_STRIP
)
1205 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1206 if (lif
->hw_features
& IONIC_ETH_HW_VLAN_RX_FILTER
)
1207 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1208 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
)
1209 netdev
->hw_features
|= NETIF_F_RXHASH
;
1210 if (lif
->hw_features
& IONIC_ETH_HW_TX_SG
)
1211 netdev
->hw_features
|= NETIF_F_SG
;
1213 if (lif
->hw_features
& IONIC_ETH_HW_TX_CSUM
)
1214 netdev
->hw_enc_features
|= NETIF_F_HW_CSUM
;
1215 if (lif
->hw_features
& IONIC_ETH_HW_RX_CSUM
)
1216 netdev
->hw_enc_features
|= NETIF_F_RXCSUM
;
1217 if (lif
->hw_features
& IONIC_ETH_HW_TSO
)
1218 netdev
->hw_enc_features
|= NETIF_F_TSO
;
1219 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPV6
)
1220 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
1221 if (lif
->hw_features
& IONIC_ETH_HW_TSO_ECN
)
1222 netdev
->hw_enc_features
|= NETIF_F_TSO_ECN
;
1223 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE
)
1224 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
;
1225 if (lif
->hw_features
& IONIC_ETH_HW_TSO_GRE_CSUM
)
1226 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE_CSUM
;
1227 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP4
)
1228 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP4
;
1229 if (lif
->hw_features
& IONIC_ETH_HW_TSO_IPXIP6
)
1230 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP6
;
1231 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP
)
1232 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
1233 if (lif
->hw_features
& IONIC_ETH_HW_TSO_UDP_CSUM
)
1234 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1236 netdev
->hw_features
|= netdev
->hw_enc_features
;
1237 netdev
->features
|= netdev
->hw_features
;
1239 netdev
->priv_flags
|= IFF_UNICAST_FLT
|
1240 IFF_LIVE_ADDR_CHANGE
;
1245 static int ionic_set_features(struct net_device
*netdev
,
1246 netdev_features_t features
)
1248 struct ionic_lif
*lif
= netdev_priv(netdev
);
1251 netdev_dbg(netdev
, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1252 __func__
, (u64
)lif
->netdev
->features
, (u64
)features
);
1254 err
= ionic_set_nic_features(lif
, features
);
1259 static int ionic_set_mac_address(struct net_device
*netdev
, void *sa
)
1261 struct sockaddr
*addr
= sa
;
1265 mac
= (u8
*)addr
->sa_data
;
1266 if (ether_addr_equal(netdev
->dev_addr
, mac
))
1269 err
= eth_prepare_mac_addr_change(netdev
, addr
);
1273 if (!is_zero_ether_addr(netdev
->dev_addr
)) {
1274 netdev_info(netdev
, "deleting mac addr %pM\n",
1276 ionic_addr_del(netdev
, netdev
->dev_addr
);
1279 eth_commit_mac_addr_change(netdev
, addr
);
1280 netdev_info(netdev
, "updating mac addr %pM\n", mac
);
1282 return ionic_addr_add(netdev
, mac
);
1285 static int ionic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1287 struct ionic_lif
*lif
= netdev_priv(netdev
);
1288 struct ionic_admin_ctx ctx
= {
1289 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1290 .cmd
.lif_setattr
= {
1291 .opcode
= IONIC_CMD_LIF_SETATTR
,
1292 .index
= cpu_to_le16(lif
->index
),
1293 .attr
= IONIC_LIF_ATTR_MTU
,
1294 .mtu
= cpu_to_le32(new_mtu
),
1299 err
= ionic_adminq_post_wait(lif
, &ctx
);
1303 netdev
->mtu
= new_mtu
;
1304 err
= ionic_reset_queues(lif
);
1309 static void ionic_tx_timeout_work(struct work_struct
*ws
)
1311 struct ionic_lif
*lif
= container_of(ws
, struct ionic_lif
, tx_timeout_work
);
1313 netdev_info(lif
->netdev
, "Tx Timeout recovery\n");
1316 ionic_reset_queues(lif
);
1320 static void ionic_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
1322 struct ionic_lif
*lif
= netdev_priv(netdev
);
1324 schedule_work(&lif
->tx_timeout_work
);
1327 static int ionic_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
1330 struct ionic_lif
*lif
= netdev_priv(netdev
);
1331 struct ionic_admin_ctx ctx
= {
1332 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1333 .cmd
.rx_filter_add
= {
1334 .opcode
= IONIC_CMD_RX_FILTER_ADD
,
1335 .lif_index
= cpu_to_le16(lif
->index
),
1336 .match
= cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN
),
1337 .vlan
.vlan
= cpu_to_le16(vid
),
1342 err
= ionic_adminq_post_wait(lif
, &ctx
);
1346 netdev_dbg(netdev
, "rx_filter add VLAN %d (id %d)\n", vid
,
1347 ctx
.comp
.rx_filter_add
.filter_id
);
1349 return ionic_rx_filter_save(lif
, 0, IONIC_RXQ_INDEX_ANY
, 0, &ctx
);
1352 static int ionic_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
1355 struct ionic_lif
*lif
= netdev_priv(netdev
);
1356 struct ionic_admin_ctx ctx
= {
1357 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1358 .cmd
.rx_filter_del
= {
1359 .opcode
= IONIC_CMD_RX_FILTER_DEL
,
1360 .lif_index
= cpu_to_le16(lif
->index
),
1363 struct ionic_rx_filter
*f
;
1365 spin_lock_bh(&lif
->rx_filters
.lock
);
1367 f
= ionic_rx_filter_by_vlan(lif
, vid
);
1369 spin_unlock_bh(&lif
->rx_filters
.lock
);
1373 netdev_dbg(netdev
, "rx_filter del VLAN %d (id %d)\n", vid
,
1374 le32_to_cpu(ctx
.cmd
.rx_filter_del
.filter_id
));
1376 ctx
.cmd
.rx_filter_del
.filter_id
= cpu_to_le32(f
->filter_id
);
1377 ionic_rx_filter_free(lif
, f
);
1378 spin_unlock_bh(&lif
->rx_filters
.lock
);
1380 return ionic_adminq_post_wait(lif
, &ctx
);
1383 int ionic_lif_rss_config(struct ionic_lif
*lif
, const u16 types
,
1384 const u8
*key
, const u32
*indir
)
1386 struct ionic_admin_ctx ctx
= {
1387 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
1388 .cmd
.lif_setattr
= {
1389 .opcode
= IONIC_CMD_LIF_SETATTR
,
1390 .attr
= IONIC_LIF_ATTR_RSS
,
1391 .rss
.addr
= cpu_to_le64(lif
->rss_ind_tbl_pa
),
1394 unsigned int i
, tbl_sz
;
1396 if (lif
->hw_features
& IONIC_ETH_HW_RX_HASH
) {
1397 lif
->rss_types
= types
;
1398 ctx
.cmd
.lif_setattr
.rss
.types
= cpu_to_le16(types
);
1402 memcpy(lif
->rss_hash_key
, key
, IONIC_RSS_HASH_KEY_SIZE
);
1405 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1406 for (i
= 0; i
< tbl_sz
; i
++)
1407 lif
->rss_ind_tbl
[i
] = indir
[i
];
1410 memcpy(ctx
.cmd
.lif_setattr
.rss
.key
, lif
->rss_hash_key
,
1411 IONIC_RSS_HASH_KEY_SIZE
);
1413 return ionic_adminq_post_wait(lif
, &ctx
);
1416 static int ionic_lif_rss_init(struct ionic_lif
*lif
)
1418 unsigned int tbl_sz
;
1421 lif
->rss_types
= IONIC_RSS_TYPE_IPV4
|
1422 IONIC_RSS_TYPE_IPV4_TCP
|
1423 IONIC_RSS_TYPE_IPV4_UDP
|
1424 IONIC_RSS_TYPE_IPV6
|
1425 IONIC_RSS_TYPE_IPV6_TCP
|
1426 IONIC_RSS_TYPE_IPV6_UDP
;
1428 /* Fill indirection table with 'default' values */
1429 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1430 for (i
= 0; i
< tbl_sz
; i
++)
1431 lif
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, lif
->nxqs
);
1433 return ionic_lif_rss_config(lif
, lif
->rss_types
, NULL
, NULL
);
1436 static void ionic_lif_rss_deinit(struct ionic_lif
*lif
)
1440 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
1441 memset(lif
->rss_ind_tbl
, 0, tbl_sz
);
1442 memset(lif
->rss_hash_key
, 0, IONIC_RSS_HASH_KEY_SIZE
);
1444 ionic_lif_rss_config(lif
, 0x0, NULL
, NULL
);
1447 static void ionic_txrx_disable(struct ionic_lif
*lif
)
1453 for (i
= 0; i
< lif
->nxqs
; i
++) {
1454 err
= ionic_qcq_disable(lif
->txqcqs
[i
].qcq
);
1455 if (err
== -ETIMEDOUT
)
1461 for (i
= 0; i
< lif
->nxqs
; i
++) {
1462 err
= ionic_qcq_disable(lif
->rxqcqs
[i
].qcq
);
1463 if (err
== -ETIMEDOUT
)
1469 static void ionic_txrx_deinit(struct ionic_lif
*lif
)
1474 for (i
= 0; i
< lif
->nxqs
; i
++) {
1475 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
].qcq
);
1476 ionic_tx_flush(&lif
->txqcqs
[i
].qcq
->cq
);
1477 ionic_tx_empty(&lif
->txqcqs
[i
].qcq
->q
);
1482 for (i
= 0; i
< lif
->nxqs
; i
++) {
1483 ionic_lif_qcq_deinit(lif
, lif
->rxqcqs
[i
].qcq
);
1484 ionic_rx_flush(&lif
->rxqcqs
[i
].qcq
->cq
);
1485 ionic_rx_empty(&lif
->rxqcqs
[i
].qcq
->q
);
1491 static void ionic_txrx_free(struct ionic_lif
*lif
)
1496 for (i
= 0; i
< lif
->nxqs
; i
++) {
1497 ionic_qcq_free(lif
, lif
->txqcqs
[i
].qcq
);
1498 lif
->txqcqs
[i
].qcq
= NULL
;
1503 for (i
= 0; i
< lif
->nxqs
; i
++) {
1504 ionic_qcq_free(lif
, lif
->rxqcqs
[i
].qcq
);
1505 lif
->rxqcqs
[i
].qcq
= NULL
;
1510 static int ionic_txrx_alloc(struct ionic_lif
*lif
)
1516 flags
= IONIC_QCQ_F_TX_STATS
| IONIC_QCQ_F_SG
;
1517 for (i
= 0; i
< lif
->nxqs
; i
++) {
1518 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_TXQ
, i
, "tx", flags
,
1520 sizeof(struct ionic_txq_desc
),
1521 sizeof(struct ionic_txq_comp
),
1522 sizeof(struct ionic_txq_sg_desc
),
1523 lif
->kern_pid
, &lif
->txqcqs
[i
].qcq
);
1527 lif
->txqcqs
[i
].qcq
->stats
= lif
->txqcqs
[i
].stats
;
1528 ionic_debugfs_add_qcq(lif
, lif
->txqcqs
[i
].qcq
);
1531 flags
= IONIC_QCQ_F_RX_STATS
| IONIC_QCQ_F_SG
| IONIC_QCQ_F_INTR
;
1532 for (i
= 0; i
< lif
->nxqs
; i
++) {
1533 err
= ionic_qcq_alloc(lif
, IONIC_QTYPE_RXQ
, i
, "rx", flags
,
1535 sizeof(struct ionic_rxq_desc
),
1536 sizeof(struct ionic_rxq_comp
),
1537 sizeof(struct ionic_rxq_sg_desc
),
1538 lif
->kern_pid
, &lif
->rxqcqs
[i
].qcq
);
1542 lif
->rxqcqs
[i
].qcq
->stats
= lif
->rxqcqs
[i
].stats
;
1544 ionic_intr_coal_init(lif
->ionic
->idev
.intr_ctrl
,
1545 lif
->rxqcqs
[i
].qcq
->intr
.index
,
1546 lif
->rx_coalesce_hw
);
1547 ionic_link_qcq_interrupts(lif
->rxqcqs
[i
].qcq
,
1548 lif
->txqcqs
[i
].qcq
);
1549 ionic_debugfs_add_qcq(lif
, lif
->rxqcqs
[i
].qcq
);
1555 ionic_txrx_free(lif
);
1560 static int ionic_txrx_init(struct ionic_lif
*lif
)
1565 for (i
= 0; i
< lif
->nxqs
; i
++) {
1566 err
= ionic_lif_txq_init(lif
, lif
->txqcqs
[i
].qcq
);
1570 err
= ionic_lif_rxq_init(lif
, lif
->rxqcqs
[i
].qcq
);
1572 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
].qcq
);
1577 if (lif
->netdev
->features
& NETIF_F_RXHASH
)
1578 ionic_lif_rss_init(lif
);
1580 ionic_set_rx_mode(lif
->netdev
);
1586 ionic_lif_qcq_deinit(lif
, lif
->txqcqs
[i
].qcq
);
1587 ionic_lif_qcq_deinit(lif
, lif
->rxqcqs
[i
].qcq
);
1593 static int ionic_txrx_enable(struct ionic_lif
*lif
)
1597 for (i
= 0; i
< lif
->nxqs
; i
++) {
1598 ionic_rx_fill(&lif
->rxqcqs
[i
].qcq
->q
);
1599 err
= ionic_qcq_enable(lif
->rxqcqs
[i
].qcq
);
1603 err
= ionic_qcq_enable(lif
->txqcqs
[i
].qcq
);
1605 if (err
!= -ETIMEDOUT
)
1606 ionic_qcq_disable(lif
->rxqcqs
[i
].qcq
);
1615 err
= ionic_qcq_disable(lif
->txqcqs
[i
].qcq
);
1616 if (err
== -ETIMEDOUT
)
1618 err
= ionic_qcq_disable(lif
->rxqcqs
[i
].qcq
);
1619 if (err
== -ETIMEDOUT
)
1626 static int ionic_start_queues(struct ionic_lif
*lif
)
1630 if (test_and_set_bit(IONIC_LIF_F_UP
, lif
->state
))
1633 err
= ionic_txrx_enable(lif
);
1635 clear_bit(IONIC_LIF_F_UP
, lif
->state
);
1638 netif_tx_wake_all_queues(lif
->netdev
);
1643 int ionic_open(struct net_device
*netdev
)
1645 struct ionic_lif
*lif
= netdev_priv(netdev
);
1648 err
= ionic_txrx_alloc(lif
);
1652 err
= ionic_txrx_init(lif
);
1656 /* don't start the queues until we have link */
1657 if (netif_carrier_ok(netdev
)) {
1658 err
= ionic_start_queues(lif
);
1660 goto err_txrx_deinit
;
1666 ionic_txrx_deinit(lif
);
1668 ionic_txrx_free(lif
);
1672 static void ionic_stop_queues(struct ionic_lif
*lif
)
1674 if (!test_and_clear_bit(IONIC_LIF_F_UP
, lif
->state
))
1677 ionic_txrx_disable(lif
);
1678 netif_tx_disable(lif
->netdev
);
1681 int ionic_stop(struct net_device
*netdev
)
1683 struct ionic_lif
*lif
= netdev_priv(netdev
);
1685 if (test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
1688 ionic_stop_queues(lif
);
1689 ionic_txrx_deinit(lif
);
1690 ionic_txrx_free(lif
);
1695 static int ionic_get_vf_config(struct net_device
*netdev
,
1696 int vf
, struct ifla_vf_info
*ivf
)
1698 struct ionic_lif
*lif
= netdev_priv(netdev
);
1699 struct ionic
*ionic
= lif
->ionic
;
1702 down_read(&ionic
->vf_op_lock
);
1704 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1708 ivf
->vlan
= ionic
->vfs
[vf
].vlanid
;
1710 ivf
->spoofchk
= ionic
->vfs
[vf
].spoofchk
;
1711 ivf
->linkstate
= ionic
->vfs
[vf
].linkstate
;
1712 ivf
->max_tx_rate
= ionic
->vfs
[vf
].maxrate
;
1713 ivf
->trusted
= ionic
->vfs
[vf
].trusted
;
1714 ether_addr_copy(ivf
->mac
, ionic
->vfs
[vf
].macaddr
);
1717 up_read(&ionic
->vf_op_lock
);
1721 static int ionic_get_vf_stats(struct net_device
*netdev
, int vf
,
1722 struct ifla_vf_stats
*vf_stats
)
1724 struct ionic_lif
*lif
= netdev_priv(netdev
);
1725 struct ionic
*ionic
= lif
->ionic
;
1726 struct ionic_lif_stats
*vs
;
1729 down_read(&ionic
->vf_op_lock
);
1731 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1734 memset(vf_stats
, 0, sizeof(*vf_stats
));
1735 vs
= &ionic
->vfs
[vf
].stats
;
1737 vf_stats
->rx_packets
= le64_to_cpu(vs
->rx_ucast_packets
);
1738 vf_stats
->tx_packets
= le64_to_cpu(vs
->tx_ucast_packets
);
1739 vf_stats
->rx_bytes
= le64_to_cpu(vs
->rx_ucast_bytes
);
1740 vf_stats
->tx_bytes
= le64_to_cpu(vs
->tx_ucast_bytes
);
1741 vf_stats
->broadcast
= le64_to_cpu(vs
->rx_bcast_packets
);
1742 vf_stats
->multicast
= le64_to_cpu(vs
->rx_mcast_packets
);
1743 vf_stats
->rx_dropped
= le64_to_cpu(vs
->rx_ucast_drop_packets
) +
1744 le64_to_cpu(vs
->rx_mcast_drop_packets
) +
1745 le64_to_cpu(vs
->rx_bcast_drop_packets
);
1746 vf_stats
->tx_dropped
= le64_to_cpu(vs
->tx_ucast_drop_packets
) +
1747 le64_to_cpu(vs
->tx_mcast_drop_packets
) +
1748 le64_to_cpu(vs
->tx_bcast_drop_packets
);
1751 up_read(&ionic
->vf_op_lock
);
1755 static int ionic_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
1757 struct ionic_lif
*lif
= netdev_priv(netdev
);
1758 struct ionic
*ionic
= lif
->ionic
;
1761 if (!(is_zero_ether_addr(mac
) || is_valid_ether_addr(mac
)))
1764 down_write(&ionic
->vf_op_lock
);
1766 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1769 ret
= ionic_set_vf_config(ionic
, vf
, IONIC_VF_ATTR_MAC
, mac
);
1771 ether_addr_copy(ionic
->vfs
[vf
].macaddr
, mac
);
1774 up_write(&ionic
->vf_op_lock
);
1778 static int ionic_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1779 u8 qos
, __be16 proto
)
1781 struct ionic_lif
*lif
= netdev_priv(netdev
);
1782 struct ionic
*ionic
= lif
->ionic
;
1785 /* until someday when we support qos */
1792 if (proto
!= htons(ETH_P_8021Q
))
1793 return -EPROTONOSUPPORT
;
1795 down_write(&ionic
->vf_op_lock
);
1797 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1800 ret
= ionic_set_vf_config(ionic
, vf
,
1801 IONIC_VF_ATTR_VLAN
, (u8
*)&vlan
);
1803 ionic
->vfs
[vf
].vlanid
= vlan
;
1806 up_write(&ionic
->vf_op_lock
);
1810 static int ionic_set_vf_rate(struct net_device
*netdev
, int vf
,
1811 int tx_min
, int tx_max
)
1813 struct ionic_lif
*lif
= netdev_priv(netdev
);
1814 struct ionic
*ionic
= lif
->ionic
;
1817 /* setting the min just seems silly */
1821 down_write(&ionic
->vf_op_lock
);
1823 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1826 ret
= ionic_set_vf_config(ionic
, vf
,
1827 IONIC_VF_ATTR_RATE
, (u8
*)&tx_max
);
1829 lif
->ionic
->vfs
[vf
].maxrate
= tx_max
;
1832 up_write(&ionic
->vf_op_lock
);
1836 static int ionic_set_vf_spoofchk(struct net_device
*netdev
, int vf
, bool set
)
1838 struct ionic_lif
*lif
= netdev_priv(netdev
);
1839 struct ionic
*ionic
= lif
->ionic
;
1840 u8 data
= set
; /* convert to u8 for config */
1843 down_write(&ionic
->vf_op_lock
);
1845 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1848 ret
= ionic_set_vf_config(ionic
, vf
,
1849 IONIC_VF_ATTR_SPOOFCHK
, &data
);
1851 ionic
->vfs
[vf
].spoofchk
= data
;
1854 up_write(&ionic
->vf_op_lock
);
1858 static int ionic_set_vf_trust(struct net_device
*netdev
, int vf
, bool set
)
1860 struct ionic_lif
*lif
= netdev_priv(netdev
);
1861 struct ionic
*ionic
= lif
->ionic
;
1862 u8 data
= set
; /* convert to u8 for config */
1865 down_write(&ionic
->vf_op_lock
);
1867 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1870 ret
= ionic_set_vf_config(ionic
, vf
,
1871 IONIC_VF_ATTR_TRUST
, &data
);
1873 ionic
->vfs
[vf
].trusted
= data
;
1876 up_write(&ionic
->vf_op_lock
);
1880 static int ionic_set_vf_link_state(struct net_device
*netdev
, int vf
, int set
)
1882 struct ionic_lif
*lif
= netdev_priv(netdev
);
1883 struct ionic
*ionic
= lif
->ionic
;
1888 case IFLA_VF_LINK_STATE_ENABLE
:
1889 data
= IONIC_VF_LINK_STATUS_UP
;
1891 case IFLA_VF_LINK_STATE_DISABLE
:
1892 data
= IONIC_VF_LINK_STATUS_DOWN
;
1894 case IFLA_VF_LINK_STATE_AUTO
:
1895 data
= IONIC_VF_LINK_STATUS_AUTO
;
1901 down_write(&ionic
->vf_op_lock
);
1903 if (vf
>= pci_num_vf(ionic
->pdev
) || !ionic
->vfs
) {
1906 ret
= ionic_set_vf_config(ionic
, vf
,
1907 IONIC_VF_ATTR_LINKSTATE
, &data
);
1909 ionic
->vfs
[vf
].linkstate
= set
;
1912 up_write(&ionic
->vf_op_lock
);
1916 static const struct net_device_ops ionic_netdev_ops
= {
1917 .ndo_open
= ionic_open
,
1918 .ndo_stop
= ionic_stop
,
1919 .ndo_start_xmit
= ionic_start_xmit
,
1920 .ndo_get_stats64
= ionic_get_stats64
,
1921 .ndo_set_rx_mode
= ionic_set_rx_mode
,
1922 .ndo_set_features
= ionic_set_features
,
1923 .ndo_set_mac_address
= ionic_set_mac_address
,
1924 .ndo_validate_addr
= eth_validate_addr
,
1925 .ndo_tx_timeout
= ionic_tx_timeout
,
1926 .ndo_change_mtu
= ionic_change_mtu
,
1927 .ndo_vlan_rx_add_vid
= ionic_vlan_rx_add_vid
,
1928 .ndo_vlan_rx_kill_vid
= ionic_vlan_rx_kill_vid
,
1929 .ndo_set_vf_vlan
= ionic_set_vf_vlan
,
1930 .ndo_set_vf_trust
= ionic_set_vf_trust
,
1931 .ndo_set_vf_mac
= ionic_set_vf_mac
,
1932 .ndo_set_vf_rate
= ionic_set_vf_rate
,
1933 .ndo_set_vf_spoofchk
= ionic_set_vf_spoofchk
,
1934 .ndo_get_vf_config
= ionic_get_vf_config
,
1935 .ndo_set_vf_link_state
= ionic_set_vf_link_state
,
1936 .ndo_get_vf_stats
= ionic_get_vf_stats
,
1939 int ionic_reset_queues(struct ionic_lif
*lif
)
1944 /* Put off the next watchdog timeout */
1945 netif_trans_update(lif
->netdev
);
1947 err
= ionic_wait_for_bit(lif
, IONIC_LIF_F_QUEUE_RESET
);
1951 running
= netif_running(lif
->netdev
);
1953 err
= ionic_stop(lif
->netdev
);
1954 if (!err
&& running
)
1955 ionic_open(lif
->netdev
);
1957 clear_bit(IONIC_LIF_F_QUEUE_RESET
, lif
->state
);
1962 static struct ionic_lif
*ionic_lif_alloc(struct ionic
*ionic
, unsigned int index
)
1964 struct device
*dev
= ionic
->dev
;
1965 struct net_device
*netdev
;
1966 struct ionic_lif
*lif
;
1970 netdev
= alloc_etherdev_mqs(sizeof(*lif
),
1971 ionic
->ntxqs_per_lif
, ionic
->ntxqs_per_lif
);
1973 dev_err(dev
, "Cannot allocate netdev, aborting\n");
1974 return ERR_PTR(-ENOMEM
);
1977 SET_NETDEV_DEV(netdev
, dev
);
1979 lif
= netdev_priv(netdev
);
1980 lif
->netdev
= netdev
;
1981 ionic
->master_lif
= lif
;
1982 netdev
->netdev_ops
= &ionic_netdev_ops
;
1983 ionic_ethtool_set_ops(netdev
);
1985 netdev
->watchdog_timeo
= 2 * HZ
;
1986 netif_carrier_off(netdev
);
1988 netdev
->min_mtu
= IONIC_MIN_MTU
;
1989 netdev
->max_mtu
= IONIC_MAX_MTU
;
1991 lif
->neqs
= ionic
->neqs_per_lif
;
1992 lif
->nxqs
= ionic
->ntxqs_per_lif
;
1996 lif
->ntxq_descs
= IONIC_DEF_TXRX_DESC
;
1997 lif
->nrxq_descs
= IONIC_DEF_TXRX_DESC
;
1999 /* Convert the default coalesce value to actual hw resolution */
2000 lif
->rx_coalesce_usecs
= IONIC_ITR_COAL_USEC_DEFAULT
;
2001 lif
->rx_coalesce_hw
= ionic_coal_usec_to_hw(lif
->ionic
,
2002 lif
->rx_coalesce_usecs
);
2004 snprintf(lif
->name
, sizeof(lif
->name
), "lif%u", index
);
2006 spin_lock_init(&lif
->adminq_lock
);
2008 spin_lock_init(&lif
->deferred
.lock
);
2009 INIT_LIST_HEAD(&lif
->deferred
.list
);
2010 INIT_WORK(&lif
->deferred
.work
, ionic_lif_deferred_work
);
2012 /* allocate lif info */
2013 lif
->info_sz
= ALIGN(sizeof(*lif
->info
), PAGE_SIZE
);
2014 lif
->info
= dma_alloc_coherent(dev
, lif
->info_sz
,
2015 &lif
->info_pa
, GFP_KERNEL
);
2017 dev_err(dev
, "Failed to allocate lif info, aborting\n");
2019 goto err_out_free_netdev
;
2022 ionic_debugfs_add_lif(lif
);
2024 /* allocate queues */
2025 err
= ionic_qcqs_alloc(lif
);
2027 goto err_out_free_lif_info
;
2029 /* allocate rss indirection table */
2030 tbl_sz
= le16_to_cpu(lif
->ionic
->ident
.lif
.eth
.rss_ind_tbl_sz
);
2031 lif
->rss_ind_tbl_sz
= sizeof(*lif
->rss_ind_tbl
) * tbl_sz
;
2032 lif
->rss_ind_tbl
= dma_alloc_coherent(dev
, lif
->rss_ind_tbl_sz
,
2033 &lif
->rss_ind_tbl_pa
,
2036 if (!lif
->rss_ind_tbl
) {
2038 dev_err(dev
, "Failed to allocate rss indirection table, aborting\n");
2039 goto err_out_free_qcqs
;
2041 netdev_rss_key_fill(lif
->rss_hash_key
, IONIC_RSS_HASH_KEY_SIZE
);
2043 list_add_tail(&lif
->list
, &ionic
->lifs
);
2048 ionic_qcqs_free(lif
);
2049 err_out_free_lif_info
:
2050 dma_free_coherent(dev
, lif
->info_sz
, lif
->info
, lif
->info_pa
);
2053 err_out_free_netdev
:
2054 free_netdev(lif
->netdev
);
2057 return ERR_PTR(err
);
2060 int ionic_lifs_alloc(struct ionic
*ionic
)
2062 struct ionic_lif
*lif
;
2064 INIT_LIST_HEAD(&ionic
->lifs
);
2066 /* only build the first lif, others are for later features */
2067 set_bit(0, ionic
->lifbits
);
2068 lif
= ionic_lif_alloc(ionic
, 0);
2070 return PTR_ERR_OR_ZERO(lif
);
2073 static void ionic_lif_reset(struct ionic_lif
*lif
)
2075 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
2077 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
2078 ionic_dev_cmd_lif_reset(idev
, lif
->index
);
2079 ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
2080 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
2083 static void ionic_lif_handle_fw_down(struct ionic_lif
*lif
)
2085 struct ionic
*ionic
= lif
->ionic
;
2087 if (test_and_set_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
2090 dev_info(ionic
->dev
, "FW Down: Stopping LIFs\n");
2092 netif_device_detach(lif
->netdev
);
2094 if (test_bit(IONIC_LIF_F_UP
, lif
->state
)) {
2095 dev_info(ionic
->dev
, "Surprise FW stop, stopping queues\n");
2096 ionic_stop_queues(lif
);
2099 if (netif_running(lif
->netdev
)) {
2100 ionic_txrx_deinit(lif
);
2101 ionic_txrx_free(lif
);
2103 ionic_lifs_deinit(ionic
);
2104 ionic_qcqs_free(lif
);
2106 dev_info(ionic
->dev
, "FW Down: LIFs stopped\n");
2109 static void ionic_lif_handle_fw_up(struct ionic_lif
*lif
)
2111 struct ionic
*ionic
= lif
->ionic
;
2114 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
2117 dev_info(ionic
->dev
, "FW Up: restarting LIFs\n");
2119 err
= ionic_qcqs_alloc(lif
);
2123 err
= ionic_lifs_init(ionic
);
2127 if (lif
->registered
)
2128 ionic_lif_set_netdev_info(lif
);
2130 ionic_rx_filter_replay(lif
);
2132 if (netif_running(lif
->netdev
)) {
2133 err
= ionic_txrx_alloc(lif
);
2135 goto err_lifs_deinit
;
2137 err
= ionic_txrx_init(lif
);
2142 clear_bit(IONIC_LIF_F_FW_RESET
, lif
->state
);
2143 ionic_link_status_check_request(lif
);
2144 netif_device_attach(lif
->netdev
);
2145 dev_info(ionic
->dev
, "FW Up: LIFs restarted\n");
2150 ionic_txrx_free(lif
);
2152 ionic_lifs_deinit(ionic
);
2154 ionic_qcqs_free(lif
);
2156 dev_err(ionic
->dev
, "FW Up: LIFs restart failed - err %d\n", err
);
2159 static void ionic_lif_free(struct ionic_lif
*lif
)
2161 struct device
*dev
= lif
->ionic
->dev
;
2163 /* free rss indirection table */
2164 dma_free_coherent(dev
, lif
->rss_ind_tbl_sz
, lif
->rss_ind_tbl
,
2165 lif
->rss_ind_tbl_pa
);
2166 lif
->rss_ind_tbl
= NULL
;
2167 lif
->rss_ind_tbl_pa
= 0;
2170 ionic_qcqs_free(lif
);
2171 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
))
2172 ionic_lif_reset(lif
);
2175 dma_free_coherent(dev
, lif
->info_sz
, lif
->info
, lif
->info_pa
);
2179 /* unmap doorbell page */
2180 ionic_bus_unmap_dbpage(lif
->ionic
, lif
->kern_dbpage
);
2181 lif
->kern_dbpage
= NULL
;
2182 kfree(lif
->dbid_inuse
);
2183 lif
->dbid_inuse
= NULL
;
2185 /* free netdev & lif */
2186 ionic_debugfs_del_lif(lif
);
2187 list_del(&lif
->list
);
2188 free_netdev(lif
->netdev
);
2191 void ionic_lifs_free(struct ionic
*ionic
)
2193 struct list_head
*cur
, *tmp
;
2194 struct ionic_lif
*lif
;
2196 list_for_each_safe(cur
, tmp
, &ionic
->lifs
) {
2197 lif
= list_entry(cur
, struct ionic_lif
, list
);
2199 ionic_lif_free(lif
);
2203 static void ionic_lif_deinit(struct ionic_lif
*lif
)
2205 if (!test_and_clear_bit(IONIC_LIF_F_INITED
, lif
->state
))
2208 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
)) {
2209 cancel_work_sync(&lif
->deferred
.work
);
2210 cancel_work_sync(&lif
->tx_timeout_work
);
2211 ionic_rx_filters_deinit(lif
);
2214 if (lif
->netdev
->features
& NETIF_F_RXHASH
)
2215 ionic_lif_rss_deinit(lif
);
2217 napi_disable(&lif
->adminqcq
->napi
);
2218 ionic_lif_qcq_deinit(lif
, lif
->notifyqcq
);
2219 ionic_lif_qcq_deinit(lif
, lif
->adminqcq
);
2221 ionic_lif_reset(lif
);
2224 void ionic_lifs_deinit(struct ionic
*ionic
)
2226 struct list_head
*cur
, *tmp
;
2227 struct ionic_lif
*lif
;
2229 list_for_each_safe(cur
, tmp
, &ionic
->lifs
) {
2230 lif
= list_entry(cur
, struct ionic_lif
, list
);
2231 ionic_lif_deinit(lif
);
2235 static int ionic_lif_adminq_init(struct ionic_lif
*lif
)
2237 struct device
*dev
= lif
->ionic
->dev
;
2238 struct ionic_q_init_comp comp
;
2239 struct ionic_dev
*idev
;
2240 struct ionic_qcq
*qcq
;
2241 struct ionic_queue
*q
;
2244 idev
= &lif
->ionic
->idev
;
2245 qcq
= lif
->adminqcq
;
2248 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
2249 ionic_dev_cmd_adminq_init(idev
, qcq
, lif
->index
, qcq
->intr
.index
);
2250 err
= ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
2251 ionic_dev_cmd_comp(idev
, (union ionic_dev_cmd_comp
*)&comp
);
2252 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
2254 netdev_err(lif
->netdev
, "adminq init failed %d\n", err
);
2258 q
->hw_type
= comp
.hw_type
;
2259 q
->hw_index
= le32_to_cpu(comp
.hw_index
);
2260 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
2262 dev_dbg(dev
, "adminq->hw_type %d\n", q
->hw_type
);
2263 dev_dbg(dev
, "adminq->hw_index %d\n", q
->hw_index
);
2265 netif_napi_add(lif
->netdev
, &qcq
->napi
, ionic_adminq_napi
,
2268 napi_enable(&qcq
->napi
);
2270 if (qcq
->flags
& IONIC_QCQ_F_INTR
)
2271 ionic_intr_mask(idev
->intr_ctrl
, qcq
->intr
.index
,
2272 IONIC_INTR_MASK_CLEAR
);
2274 qcq
->flags
|= IONIC_QCQ_F_INITED
;
2279 static int ionic_lif_notifyq_init(struct ionic_lif
*lif
)
2281 struct ionic_qcq
*qcq
= lif
->notifyqcq
;
2282 struct device
*dev
= lif
->ionic
->dev
;
2283 struct ionic_queue
*q
= &qcq
->q
;
2286 struct ionic_admin_ctx ctx
= {
2287 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
2289 .opcode
= IONIC_CMD_Q_INIT
,
2290 .lif_index
= cpu_to_le16(lif
->index
),
2292 .index
= cpu_to_le32(q
->index
),
2293 .flags
= cpu_to_le16(IONIC_QINIT_F_IRQ
|
2295 .intr_index
= cpu_to_le16(lif
->adminqcq
->intr
.index
),
2296 .pid
= cpu_to_le16(q
->pid
),
2297 .ring_size
= ilog2(q
->num_descs
),
2298 .ring_base
= cpu_to_le64(q
->base_pa
),
2302 dev_dbg(dev
, "notifyq_init.pid %d\n", ctx
.cmd
.q_init
.pid
);
2303 dev_dbg(dev
, "notifyq_init.index %d\n", ctx
.cmd
.q_init
.index
);
2304 dev_dbg(dev
, "notifyq_init.ring_base 0x%llx\n", ctx
.cmd
.q_init
.ring_base
);
2305 dev_dbg(dev
, "notifyq_init.ring_size %d\n", ctx
.cmd
.q_init
.ring_size
);
2307 err
= ionic_adminq_post_wait(lif
, &ctx
);
2312 q
->hw_type
= ctx
.comp
.q_init
.hw_type
;
2313 q
->hw_index
= le32_to_cpu(ctx
.comp
.q_init
.hw_index
);
2314 q
->dbval
= IONIC_DBELL_QID(q
->hw_index
);
2316 dev_dbg(dev
, "notifyq->hw_type %d\n", q
->hw_type
);
2317 dev_dbg(dev
, "notifyq->hw_index %d\n", q
->hw_index
);
2319 /* preset the callback info */
2320 q
->info
[0].cb_arg
= lif
;
2322 qcq
->flags
|= IONIC_QCQ_F_INITED
;
2327 static int ionic_station_set(struct ionic_lif
*lif
)
2329 struct net_device
*netdev
= lif
->netdev
;
2330 struct ionic_admin_ctx ctx
= {
2331 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
2332 .cmd
.lif_getattr
= {
2333 .opcode
= IONIC_CMD_LIF_GETATTR
,
2334 .index
= cpu_to_le16(lif
->index
),
2335 .attr
= IONIC_LIF_ATTR_MAC
,
2338 struct sockaddr addr
;
2341 err
= ionic_adminq_post_wait(lif
, &ctx
);
2344 netdev_dbg(lif
->netdev
, "found initial MAC addr %pM\n",
2345 ctx
.comp
.lif_getattr
.mac
);
2346 if (is_zero_ether_addr(ctx
.comp
.lif_getattr
.mac
))
2349 if (!ether_addr_equal(ctx
.comp
.lif_getattr
.mac
, netdev
->dev_addr
)) {
2350 memcpy(addr
.sa_data
, ctx
.comp
.lif_getattr
.mac
, netdev
->addr_len
);
2351 addr
.sa_family
= AF_INET
;
2352 err
= eth_prepare_mac_addr_change(netdev
, &addr
);
2354 netdev_warn(lif
->netdev
, "ignoring bad MAC addr from NIC %pM - err %d\n",
2359 if (!is_zero_ether_addr(netdev
->dev_addr
)) {
2360 netdev_dbg(lif
->netdev
, "deleting station MAC addr %pM\n",
2362 ionic_lif_addr(lif
, netdev
->dev_addr
, false);
2365 eth_commit_mac_addr_change(netdev
, &addr
);
2368 netdev_dbg(lif
->netdev
, "adding station MAC addr %pM\n",
2370 ionic_lif_addr(lif
, netdev
->dev_addr
, true);
2375 static int ionic_lif_init(struct ionic_lif
*lif
)
2377 struct ionic_dev
*idev
= &lif
->ionic
->idev
;
2378 struct device
*dev
= lif
->ionic
->dev
;
2379 struct ionic_lif_init_comp comp
;
2383 mutex_lock(&lif
->ionic
->dev_cmd_lock
);
2384 ionic_dev_cmd_lif_init(idev
, lif
->index
, lif
->info_pa
);
2385 err
= ionic_dev_cmd_wait(lif
->ionic
, DEVCMD_TIMEOUT
);
2386 ionic_dev_cmd_comp(idev
, (union ionic_dev_cmd_comp
*)&comp
);
2387 mutex_unlock(&lif
->ionic
->dev_cmd_lock
);
2391 lif
->hw_index
= le16_to_cpu(comp
.hw_index
);
2393 /* now that we have the hw_index we can figure out our doorbell page */
2394 lif
->dbid_count
= le32_to_cpu(lif
->ionic
->ident
.dev
.ndbpgs_per_lif
);
2395 if (!lif
->dbid_count
) {
2396 dev_err(dev
, "No doorbell pages, aborting\n");
2400 lif
->dbid_inuse
= bitmap_alloc(lif
->dbid_count
, GFP_KERNEL
);
2401 if (!lif
->dbid_inuse
) {
2402 dev_err(dev
, "Failed alloc doorbell id bitmap, aborting\n");
2406 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2407 set_bit(0, lif
->dbid_inuse
);
2410 dbpage_num
= ionic_db_page_num(lif
, lif
->kern_pid
);
2411 lif
->kern_dbpage
= ionic_bus_map_dbpage(lif
->ionic
, dbpage_num
);
2412 if (!lif
->kern_dbpage
) {
2413 dev_err(dev
, "Cannot map dbpage, aborting\n");
2415 goto err_out_free_dbid
;
2418 err
= ionic_lif_adminq_init(lif
);
2420 goto err_out_adminq_deinit
;
2422 if (lif
->ionic
->nnqs_per_lif
) {
2423 err
= ionic_lif_notifyq_init(lif
);
2425 goto err_out_notifyq_deinit
;
2428 err
= ionic_init_nic_features(lif
);
2430 goto err_out_notifyq_deinit
;
2432 if (!test_bit(IONIC_LIF_F_FW_RESET
, lif
->state
)) {
2433 err
= ionic_rx_filters_init(lif
);
2435 goto err_out_notifyq_deinit
;
2438 err
= ionic_station_set(lif
);
2440 goto err_out_notifyq_deinit
;
2442 lif
->rx_copybreak
= IONIC_RX_COPYBREAK_DEFAULT
;
2444 set_bit(IONIC_LIF_F_INITED
, lif
->state
);
2446 INIT_WORK(&lif
->tx_timeout_work
, ionic_tx_timeout_work
);
2450 err_out_notifyq_deinit
:
2451 ionic_lif_qcq_deinit(lif
, lif
->notifyqcq
);
2452 err_out_adminq_deinit
:
2453 ionic_lif_qcq_deinit(lif
, lif
->adminqcq
);
2454 ionic_lif_reset(lif
);
2455 ionic_bus_unmap_dbpage(lif
->ionic
, lif
->kern_dbpage
);
2456 lif
->kern_dbpage
= NULL
;
2458 kfree(lif
->dbid_inuse
);
2459 lif
->dbid_inuse
= NULL
;
2464 int ionic_lifs_init(struct ionic
*ionic
)
2466 struct list_head
*cur
, *tmp
;
2467 struct ionic_lif
*lif
;
2470 list_for_each_safe(cur
, tmp
, &ionic
->lifs
) {
2471 lif
= list_entry(cur
, struct ionic_lif
, list
);
2472 err
= ionic_lif_init(lif
);
2480 static void ionic_lif_notify_work(struct work_struct
*ws
)
2484 static void ionic_lif_set_netdev_info(struct ionic_lif
*lif
)
2486 struct ionic_admin_ctx ctx
= {
2487 .work
= COMPLETION_INITIALIZER_ONSTACK(ctx
.work
),
2488 .cmd
.lif_setattr
= {
2489 .opcode
= IONIC_CMD_LIF_SETATTR
,
2490 .index
= cpu_to_le16(lif
->index
),
2491 .attr
= IONIC_LIF_ATTR_NAME
,
2495 strlcpy(ctx
.cmd
.lif_setattr
.name
, lif
->netdev
->name
,
2496 sizeof(ctx
.cmd
.lif_setattr
.name
));
2498 ionic_adminq_post_wait(lif
, &ctx
);
2501 static struct ionic_lif
*ionic_netdev_lif(struct net_device
*netdev
)
2503 if (!netdev
|| netdev
->netdev_ops
->ndo_start_xmit
!= ionic_start_xmit
)
2506 return netdev_priv(netdev
);
2509 static int ionic_lif_notify(struct notifier_block
*nb
,
2510 unsigned long event
, void *info
)
2512 struct net_device
*ndev
= netdev_notifier_info_to_dev(info
);
2513 struct ionic
*ionic
= container_of(nb
, struct ionic
, nb
);
2514 struct ionic_lif
*lif
= ionic_netdev_lif(ndev
);
2516 if (!lif
|| lif
->ionic
!= ionic
)
2520 case NETDEV_CHANGENAME
:
2521 ionic_lif_set_netdev_info(lif
);
2528 int ionic_lifs_register(struct ionic
*ionic
)
2532 /* the netdev is not registered on the management device, it is
2533 * only used as a vehicle for napi operations on the adminq
2535 if (ionic
->is_mgmt_nic
)
2538 INIT_WORK(&ionic
->nb_work
, ionic_lif_notify_work
);
2540 ionic
->nb
.notifier_call
= ionic_lif_notify
;
2542 err
= register_netdevice_notifier(&ionic
->nb
);
2544 ionic
->nb
.notifier_call
= NULL
;
2546 /* only register LIF0 for now */
2547 err
= register_netdev(ionic
->master_lif
->netdev
);
2549 dev_err(ionic
->dev
, "Cannot register net device, aborting\n");
2553 ionic_link_status_check_request(ionic
->master_lif
);
2554 ionic
->master_lif
->registered
= true;
2559 void ionic_lifs_unregister(struct ionic
*ionic
)
2561 if (ionic
->nb
.notifier_call
) {
2562 unregister_netdevice_notifier(&ionic
->nb
);
2563 cancel_work_sync(&ionic
->nb_work
);
2564 ionic
->nb
.notifier_call
= NULL
;
2567 /* There is only one lif ever registered in the
2568 * current model, so don't bother searching the
2569 * ionic->lif for candidates to unregister
2571 if (ionic
->master_lif
&&
2572 ionic
->master_lif
->netdev
->reg_state
== NETREG_REGISTERED
)
2573 unregister_netdev(ionic
->master_lif
->netdev
);
2576 int ionic_lif_identify(struct ionic
*ionic
, u8 lif_type
,
2577 union ionic_lif_identity
*lid
)
2579 struct ionic_dev
*idev
= &ionic
->idev
;
2583 sz
= min(sizeof(*lid
), sizeof(idev
->dev_cmd_regs
->data
));
2585 mutex_lock(&ionic
->dev_cmd_lock
);
2586 ionic_dev_cmd_lif_identify(idev
, lif_type
, IONIC_IDENTITY_VERSION_1
);
2587 err
= ionic_dev_cmd_wait(ionic
, DEVCMD_TIMEOUT
);
2588 memcpy_fromio(lid
, &idev
->dev_cmd_regs
->data
, sz
);
2589 mutex_unlock(&ionic
->dev_cmd_lock
);
2593 dev_dbg(ionic
->dev
, "capabilities 0x%llx\n",
2594 le64_to_cpu(lid
->capabilities
));
2596 dev_dbg(ionic
->dev
, "eth.max_ucast_filters %d\n",
2597 le32_to_cpu(lid
->eth
.max_ucast_filters
));
2598 dev_dbg(ionic
->dev
, "eth.max_mcast_filters %d\n",
2599 le32_to_cpu(lid
->eth
.max_mcast_filters
));
2600 dev_dbg(ionic
->dev
, "eth.features 0x%llx\n",
2601 le64_to_cpu(lid
->eth
.config
.features
));
2602 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2603 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_ADMINQ
]));
2604 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2605 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_NOTIFYQ
]));
2606 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2607 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_RXQ
]));
2608 dev_dbg(ionic
->dev
, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2609 le32_to_cpu(lid
->eth
.config
.queue_count
[IONIC_QTYPE_TXQ
]));
2610 dev_dbg(ionic
->dev
, "eth.config.name %s\n", lid
->eth
.config
.name
);
2611 dev_dbg(ionic
->dev
, "eth.config.mac %pM\n", lid
->eth
.config
.mac
);
2612 dev_dbg(ionic
->dev
, "eth.config.mtu %d\n",
2613 le32_to_cpu(lid
->eth
.config
.mtu
));
2618 int ionic_lifs_size(struct ionic
*ionic
)
2620 struct ionic_identity
*ident
= &ionic
->ident
;
2621 unsigned int nintrs
, dev_nintrs
;
2622 union ionic_lif_config
*lc
;
2623 unsigned int ntxqs_per_lif
;
2624 unsigned int nrxqs_per_lif
;
2625 unsigned int neqs_per_lif
;
2626 unsigned int nnqs_per_lif
;
2627 unsigned int nxqs
, neqs
;
2628 unsigned int min_intrs
;
2631 lc
= &ident
->lif
.eth
.config
;
2632 dev_nintrs
= le32_to_cpu(ident
->dev
.nintrs
);
2633 neqs_per_lif
= le32_to_cpu(ident
->lif
.rdma
.eq_qtype
.qid_count
);
2634 nnqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_NOTIFYQ
]);
2635 ntxqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_TXQ
]);
2636 nrxqs_per_lif
= le32_to_cpu(lc
->queue_count
[IONIC_QTYPE_RXQ
]);
2638 nxqs
= min(ntxqs_per_lif
, nrxqs_per_lif
);
2639 nxqs
= min(nxqs
, num_online_cpus());
2640 neqs
= min(neqs_per_lif
, num_online_cpus());
2644 * 1 for master lif adminq/notifyq
2645 * 1 for each CPU for master lif TxRx queue pairs
2646 * whatever's left is for RDMA queues
2648 nintrs
= 1 + nxqs
+ neqs
;
2649 min_intrs
= 2; /* adminq + 1 TxRx queue pair */
2651 if (nintrs
> dev_nintrs
)
2654 err
= ionic_bus_alloc_irq_vectors(ionic
, nintrs
);
2655 if (err
< 0 && err
!= -ENOSPC
) {
2656 dev_err(ionic
->dev
, "Can't get intrs from OS: %d\n", err
);
2662 if (err
!= nintrs
) {
2663 ionic_bus_free_irq_vectors(ionic
);
2667 ionic
->nnqs_per_lif
= nnqs_per_lif
;
2668 ionic
->neqs_per_lif
= neqs
;
2669 ionic
->ntxqs_per_lif
= nxqs
;
2670 ionic
->nrxqs_per_lif
= nxqs
;
2671 ionic
->nintrs
= nintrs
;
2673 ionic_debugfs_add_sizes(ionic
);
2678 if (nnqs_per_lif
> 1) {
2690 dev_err(ionic
->dev
, "Can't get minimum %d intrs from OS\n", min_intrs
);