1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
83 static const char ibmvnic_driver_name
[] = "ibmvnic";
84 static const char ibmvnic_driver_string
[] = "IBM System i/p Virtual NIC Driver";
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION
);
91 static int ibmvnic_version
= IBMVNIC_INITIAL_VERSION
;
92 static int ibmvnic_remove(struct vio_dev
*);
93 static void release_sub_crqs(struct ibmvnic_adapter
*, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter
*, union ibmvnic_crq
*);
98 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
99 union sub_crq
*sub_crq
);
100 static int send_subcrq_indirect(struct ibmvnic_adapter
*, u64
, u64
, u64
);
101 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
);
102 static int enable_scrq_irq(struct ibmvnic_adapter
*,
103 struct ibmvnic_sub_crq_queue
*);
104 static int disable_scrq_irq(struct ibmvnic_adapter
*,
105 struct ibmvnic_sub_crq_queue
*);
106 static int pending_scrq(struct ibmvnic_adapter
*,
107 struct ibmvnic_sub_crq_queue
*);
108 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*,
109 struct ibmvnic_sub_crq_queue
*);
110 static int ibmvnic_poll(struct napi_struct
*napi
, int data
);
111 static void send_map_query(struct ibmvnic_adapter
*adapter
);
112 static int send_request_map(struct ibmvnic_adapter
*, dma_addr_t
, __be32
, u8
);
113 static int send_request_unmap(struct ibmvnic_adapter
*, u8
);
114 static int send_login(struct ibmvnic_adapter
*adapter
);
115 static void send_cap_queries(struct ibmvnic_adapter
*adapter
);
116 static int init_sub_crqs(struct ibmvnic_adapter
*);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
);
118 static int ibmvnic_init(struct ibmvnic_adapter
*);
119 static int ibmvnic_reset_init(struct ibmvnic_adapter
*);
120 static void release_crq_queue(struct ibmvnic_adapter
*);
121 static int __ibmvnic_set_mac(struct net_device
*netdev
, struct sockaddr
*p
);
122 static int init_crq_queue(struct ibmvnic_adapter
*adapter
);
124 struct ibmvnic_stat
{
125 char name
[ETH_GSTRING_LEN
];
129 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
130 offsetof(struct ibmvnic_statistics, stat))
131 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
133 static const struct ibmvnic_stat ibmvnic_stats
[] = {
134 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets
)},
135 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes
)},
136 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets
)},
137 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes
)},
138 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets
)},
139 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets
)},
140 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets
)},
141 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets
)},
142 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets
)},
143 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets
)},
144 {"align_errors", IBMVNIC_STAT_OFF(align_errors
)},
145 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors
)},
146 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames
)},
147 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames
)},
148 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors
)},
149 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx
)},
150 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions
)},
151 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions
)},
152 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors
)},
153 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense
)},
154 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames
)},
155 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors
)},
158 static long h_reg_sub_crq(unsigned long unit_address
, unsigned long token
,
159 unsigned long length
, unsigned long *number
,
162 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
165 rc
= plpar_hcall(H_REG_SUB_CRQ
, retbuf
, unit_address
, token
, length
);
172 static int alloc_long_term_buff(struct ibmvnic_adapter
*adapter
,
173 struct ibmvnic_long_term_buff
*ltb
, int size
)
175 struct device
*dev
= &adapter
->vdev
->dev
;
179 ltb
->buff
= dma_alloc_coherent(dev
, ltb
->size
, <b
->addr
,
183 dev_err(dev
, "Couldn't alloc long term buffer\n");
186 ltb
->map_id
= adapter
->map_id
;
189 init_completion(&adapter
->fw_done
);
190 rc
= send_request_map(adapter
, ltb
->addr
,
191 ltb
->size
, ltb
->map_id
);
193 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
196 wait_for_completion(&adapter
->fw_done
);
198 if (adapter
->fw_done_rc
) {
199 dev_err(dev
, "Couldn't map long term buffer,rc = %d\n",
200 adapter
->fw_done_rc
);
201 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
207 static void free_long_term_buff(struct ibmvnic_adapter
*adapter
,
208 struct ibmvnic_long_term_buff
*ltb
)
210 struct device
*dev
= &adapter
->vdev
->dev
;
215 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
216 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
)
217 send_request_unmap(adapter
, ltb
->map_id
);
218 dma_free_coherent(dev
, ltb
->size
, ltb
->buff
, ltb
->addr
);
221 static int reset_long_term_buff(struct ibmvnic_adapter
*adapter
,
222 struct ibmvnic_long_term_buff
*ltb
)
226 memset(ltb
->buff
, 0, ltb
->size
);
228 init_completion(&adapter
->fw_done
);
229 rc
= send_request_map(adapter
, ltb
->addr
, ltb
->size
, ltb
->map_id
);
232 wait_for_completion(&adapter
->fw_done
);
234 if (adapter
->fw_done_rc
) {
235 dev_info(&adapter
->vdev
->dev
,
236 "Reset failed, attempting to free and reallocate buffer\n");
237 free_long_term_buff(adapter
, ltb
);
238 return alloc_long_term_buff(adapter
, ltb
, ltb
->size
);
243 static void deactivate_rx_pools(struct ibmvnic_adapter
*adapter
)
247 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
249 adapter
->rx_pool
[i
].active
= 0;
252 static void replenish_rx_pool(struct ibmvnic_adapter
*adapter
,
253 struct ibmvnic_rx_pool
*pool
)
255 int count
= pool
->size
- atomic_read(&pool
->available
);
256 struct device
*dev
= &adapter
->vdev
->dev
;
257 int buffers_added
= 0;
258 unsigned long lpar_rc
;
259 union sub_crq sub_crq
;
272 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
273 be32_to_cpu(adapter
->login_rsp_buf
->
276 for (i
= 0; i
< count
; ++i
) {
277 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
279 dev_err(dev
, "Couldn't replenish rx buff\n");
280 adapter
->replenish_no_mem
++;
284 index
= pool
->free_map
[pool
->next_free
];
286 if (pool
->rx_buff
[index
].skb
)
287 dev_err(dev
, "Inconsistent free_map!\n");
289 /* Copy the skb to the long term mapped DMA buffer */
290 offset
= index
* pool
->buff_size
;
291 dst
= pool
->long_term_buff
.buff
+ offset
;
292 memset(dst
, 0, pool
->buff_size
);
293 dma_addr
= pool
->long_term_buff
.addr
+ offset
;
294 pool
->rx_buff
[index
].data
= dst
;
296 pool
->free_map
[pool
->next_free
] = IBMVNIC_INVALID_MAP
;
297 pool
->rx_buff
[index
].dma
= dma_addr
;
298 pool
->rx_buff
[index
].skb
= skb
;
299 pool
->rx_buff
[index
].pool_index
= pool
->index
;
300 pool
->rx_buff
[index
].size
= pool
->buff_size
;
302 memset(&sub_crq
, 0, sizeof(sub_crq
));
303 sub_crq
.rx_add
.first
= IBMVNIC_CRQ_CMD
;
304 sub_crq
.rx_add
.correlator
=
305 cpu_to_be64((u64
)&pool
->rx_buff
[index
]);
306 sub_crq
.rx_add
.ioba
= cpu_to_be32(dma_addr
);
307 sub_crq
.rx_add
.map_id
= pool
->long_term_buff
.map_id
;
309 /* The length field of the sCRQ is defined to be 24 bits so the
310 * buffer size needs to be left shifted by a byte before it is
311 * converted to big endian to prevent the last byte from being
314 #ifdef __LITTLE_ENDIAN__
317 sub_crq
.rx_add
.len
= cpu_to_be32(pool
->buff_size
<< shift
);
319 lpar_rc
= send_subcrq(adapter
, handle_array
[pool
->index
],
321 if (lpar_rc
!= H_SUCCESS
)
325 adapter
->replenish_add_buff_success
++;
326 pool
->next_free
= (pool
->next_free
+ 1) % pool
->size
;
328 atomic_add(buffers_added
, &pool
->available
);
332 if (lpar_rc
!= H_PARAMETER
&& lpar_rc
!= H_CLOSED
)
333 dev_err_ratelimited(dev
, "rx: replenish packet buffer failed\n");
334 pool
->free_map
[pool
->next_free
] = index
;
335 pool
->rx_buff
[index
].skb
= NULL
;
337 dev_kfree_skb_any(skb
);
338 adapter
->replenish_add_buff_failure
++;
339 atomic_add(buffers_added
, &pool
->available
);
341 if (lpar_rc
== H_CLOSED
|| adapter
->failover_pending
) {
342 /* Disable buffer pool replenishment and report carrier off if
343 * queue is closed or pending failover.
344 * Firmware guarantees that a signal will be sent to the
345 * driver, triggering a reset.
347 deactivate_rx_pools(adapter
);
348 netif_carrier_off(adapter
->netdev
);
352 static void replenish_pools(struct ibmvnic_adapter
*adapter
)
356 adapter
->replenish_task_cycles
++;
357 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
359 if (adapter
->rx_pool
[i
].active
)
360 replenish_rx_pool(adapter
, &adapter
->rx_pool
[i
]);
364 static void release_stats_buffers(struct ibmvnic_adapter
*adapter
)
366 kfree(adapter
->tx_stats_buffers
);
367 kfree(adapter
->rx_stats_buffers
);
368 adapter
->tx_stats_buffers
= NULL
;
369 adapter
->rx_stats_buffers
= NULL
;
372 static int init_stats_buffers(struct ibmvnic_adapter
*adapter
)
374 adapter
->tx_stats_buffers
=
375 kcalloc(IBMVNIC_MAX_QUEUES
,
376 sizeof(struct ibmvnic_tx_queue_stats
),
378 if (!adapter
->tx_stats_buffers
)
381 adapter
->rx_stats_buffers
=
382 kcalloc(IBMVNIC_MAX_QUEUES
,
383 sizeof(struct ibmvnic_rx_queue_stats
),
385 if (!adapter
->rx_stats_buffers
)
391 static void release_stats_token(struct ibmvnic_adapter
*adapter
)
393 struct device
*dev
= &adapter
->vdev
->dev
;
395 if (!adapter
->stats_token
)
398 dma_unmap_single(dev
, adapter
->stats_token
,
399 sizeof(struct ibmvnic_statistics
),
401 adapter
->stats_token
= 0;
404 static int init_stats_token(struct ibmvnic_adapter
*adapter
)
406 struct device
*dev
= &adapter
->vdev
->dev
;
409 stok
= dma_map_single(dev
, &adapter
->stats
,
410 sizeof(struct ibmvnic_statistics
),
412 if (dma_mapping_error(dev
, stok
)) {
413 dev_err(dev
, "Couldn't map stats buffer\n");
417 adapter
->stats_token
= stok
;
418 netdev_dbg(adapter
->netdev
, "Stats token initialized (%llx)\n", stok
);
422 static int reset_rx_pools(struct ibmvnic_adapter
*adapter
)
424 struct ibmvnic_rx_pool
*rx_pool
;
429 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
430 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
432 rx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
433 for (i
= 0; i
< rx_scrqs
; i
++) {
434 rx_pool
= &adapter
->rx_pool
[i
];
436 netdev_dbg(adapter
->netdev
, "Re-setting rx_pool[%d]\n", i
);
438 if (rx_pool
->buff_size
!= be64_to_cpu(size_array
[i
])) {
439 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
440 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
441 alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
445 rc
= reset_long_term_buff(adapter
,
446 &rx_pool
->long_term_buff
);
452 for (j
= 0; j
< rx_pool
->size
; j
++)
453 rx_pool
->free_map
[j
] = j
;
455 memset(rx_pool
->rx_buff
, 0,
456 rx_pool
->size
* sizeof(struct ibmvnic_rx_buff
));
458 atomic_set(&rx_pool
->available
, 0);
459 rx_pool
->next_alloc
= 0;
460 rx_pool
->next_free
= 0;
467 static void release_rx_pools(struct ibmvnic_adapter
*adapter
)
469 struct ibmvnic_rx_pool
*rx_pool
;
472 if (!adapter
->rx_pool
)
475 for (i
= 0; i
< adapter
->num_active_rx_pools
; i
++) {
476 rx_pool
= &adapter
->rx_pool
[i
];
478 netdev_dbg(adapter
->netdev
, "Releasing rx_pool[%d]\n", i
);
480 kfree(rx_pool
->free_map
);
481 free_long_term_buff(adapter
, &rx_pool
->long_term_buff
);
483 if (!rx_pool
->rx_buff
)
486 for (j
= 0; j
< rx_pool
->size
; j
++) {
487 if (rx_pool
->rx_buff
[j
].skb
) {
488 dev_kfree_skb_any(rx_pool
->rx_buff
[j
].skb
);
489 rx_pool
->rx_buff
[j
].skb
= NULL
;
493 kfree(rx_pool
->rx_buff
);
496 kfree(adapter
->rx_pool
);
497 adapter
->rx_pool
= NULL
;
498 adapter
->num_active_rx_pools
= 0;
501 static int init_rx_pools(struct net_device
*netdev
)
503 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
504 struct device
*dev
= &adapter
->vdev
->dev
;
505 struct ibmvnic_rx_pool
*rx_pool
;
511 be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
512 size_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
513 be32_to_cpu(adapter
->login_rsp_buf
->off_rxadd_buff_size
));
515 adapter
->rx_pool
= kcalloc(rxadd_subcrqs
,
516 sizeof(struct ibmvnic_rx_pool
),
518 if (!adapter
->rx_pool
) {
519 dev_err(dev
, "Failed to allocate rx pools\n");
523 adapter
->num_active_rx_pools
= rxadd_subcrqs
;
525 for (i
= 0; i
< rxadd_subcrqs
; i
++) {
526 rx_pool
= &adapter
->rx_pool
[i
];
528 netdev_dbg(adapter
->netdev
,
529 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
530 i
, adapter
->req_rx_add_entries_per_subcrq
,
531 be64_to_cpu(size_array
[i
]));
533 rx_pool
->size
= adapter
->req_rx_add_entries_per_subcrq
;
535 rx_pool
->buff_size
= be64_to_cpu(size_array
[i
]);
538 rx_pool
->free_map
= kcalloc(rx_pool
->size
, sizeof(int),
540 if (!rx_pool
->free_map
) {
541 release_rx_pools(adapter
);
545 rx_pool
->rx_buff
= kcalloc(rx_pool
->size
,
546 sizeof(struct ibmvnic_rx_buff
),
548 if (!rx_pool
->rx_buff
) {
549 dev_err(dev
, "Couldn't alloc rx buffers\n");
550 release_rx_pools(adapter
);
554 if (alloc_long_term_buff(adapter
, &rx_pool
->long_term_buff
,
555 rx_pool
->size
* rx_pool
->buff_size
)) {
556 release_rx_pools(adapter
);
560 for (j
= 0; j
< rx_pool
->size
; ++j
)
561 rx_pool
->free_map
[j
] = j
;
563 atomic_set(&rx_pool
->available
, 0);
564 rx_pool
->next_alloc
= 0;
565 rx_pool
->next_free
= 0;
571 static int reset_one_tx_pool(struct ibmvnic_adapter
*adapter
,
572 struct ibmvnic_tx_pool
*tx_pool
)
576 rc
= reset_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
580 memset(tx_pool
->tx_buff
, 0,
581 tx_pool
->num_buffers
*
582 sizeof(struct ibmvnic_tx_buff
));
584 for (i
= 0; i
< tx_pool
->num_buffers
; i
++)
585 tx_pool
->free_map
[i
] = i
;
587 tx_pool
->consumer_index
= 0;
588 tx_pool
->producer_index
= 0;
593 static int reset_tx_pools(struct ibmvnic_adapter
*adapter
)
598 tx_scrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
599 for (i
= 0; i
< tx_scrqs
; i
++) {
600 rc
= reset_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
603 rc
= reset_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
611 static void release_vpd_data(struct ibmvnic_adapter
*adapter
)
616 kfree(adapter
->vpd
->buff
);
622 static void release_one_tx_pool(struct ibmvnic_adapter
*adapter
,
623 struct ibmvnic_tx_pool
*tx_pool
)
625 kfree(tx_pool
->tx_buff
);
626 kfree(tx_pool
->free_map
);
627 free_long_term_buff(adapter
, &tx_pool
->long_term_buff
);
630 static void release_tx_pools(struct ibmvnic_adapter
*adapter
)
634 if (!adapter
->tx_pool
)
637 for (i
= 0; i
< adapter
->num_active_tx_pools
; i
++) {
638 release_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
639 release_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
642 kfree(adapter
->tx_pool
);
643 adapter
->tx_pool
= NULL
;
644 kfree(adapter
->tso_pool
);
645 adapter
->tso_pool
= NULL
;
646 adapter
->num_active_tx_pools
= 0;
649 static int init_one_tx_pool(struct net_device
*netdev
,
650 struct ibmvnic_tx_pool
*tx_pool
,
651 int num_entries
, int buf_size
)
653 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
656 tx_pool
->tx_buff
= kcalloc(num_entries
,
657 sizeof(struct ibmvnic_tx_buff
),
659 if (!tx_pool
->tx_buff
)
662 if (alloc_long_term_buff(adapter
, &tx_pool
->long_term_buff
,
663 num_entries
* buf_size
))
666 tx_pool
->free_map
= kcalloc(num_entries
, sizeof(int), GFP_KERNEL
);
667 if (!tx_pool
->free_map
)
670 for (i
= 0; i
< num_entries
; i
++)
671 tx_pool
->free_map
[i
] = i
;
673 tx_pool
->consumer_index
= 0;
674 tx_pool
->producer_index
= 0;
675 tx_pool
->num_buffers
= num_entries
;
676 tx_pool
->buf_size
= buf_size
;
681 static int init_tx_pools(struct net_device
*netdev
)
683 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
687 tx_subcrqs
= be32_to_cpu(adapter
->login_rsp_buf
->num_txsubm_subcrqs
);
688 adapter
->tx_pool
= kcalloc(tx_subcrqs
,
689 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
690 if (!adapter
->tx_pool
)
693 adapter
->tso_pool
= kcalloc(tx_subcrqs
,
694 sizeof(struct ibmvnic_tx_pool
), GFP_KERNEL
);
695 if (!adapter
->tso_pool
)
698 adapter
->num_active_tx_pools
= tx_subcrqs
;
700 for (i
= 0; i
< tx_subcrqs
; i
++) {
701 rc
= init_one_tx_pool(netdev
, &adapter
->tx_pool
[i
],
702 adapter
->req_tx_entries_per_subcrq
,
703 adapter
->req_mtu
+ VLAN_HLEN
);
705 release_tx_pools(adapter
);
709 init_one_tx_pool(netdev
, &adapter
->tso_pool
[i
],
713 release_tx_pools(adapter
);
721 static void ibmvnic_napi_enable(struct ibmvnic_adapter
*adapter
)
725 if (adapter
->napi_enabled
)
728 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
729 napi_enable(&adapter
->napi
[i
]);
731 adapter
->napi_enabled
= true;
734 static void ibmvnic_napi_disable(struct ibmvnic_adapter
*adapter
)
738 if (!adapter
->napi_enabled
)
741 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
742 netdev_dbg(adapter
->netdev
, "Disabling napi[%d]\n", i
);
743 napi_disable(&adapter
->napi
[i
]);
746 adapter
->napi_enabled
= false;
749 static int init_napi(struct ibmvnic_adapter
*adapter
)
753 adapter
->napi
= kcalloc(adapter
->req_rx_queues
,
754 sizeof(struct napi_struct
), GFP_KERNEL
);
758 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
759 netdev_dbg(adapter
->netdev
, "Adding napi[%d]\n", i
);
760 netif_napi_add(adapter
->netdev
, &adapter
->napi
[i
],
761 ibmvnic_poll
, NAPI_POLL_WEIGHT
);
764 adapter
->num_active_rx_napi
= adapter
->req_rx_queues
;
768 static void release_napi(struct ibmvnic_adapter
*adapter
)
775 for (i
= 0; i
< adapter
->num_active_rx_napi
; i
++) {
776 netdev_dbg(adapter
->netdev
, "Releasing napi[%d]\n", i
);
777 netif_napi_del(&adapter
->napi
[i
]);
780 kfree(adapter
->napi
);
781 adapter
->napi
= NULL
;
782 adapter
->num_active_rx_napi
= 0;
783 adapter
->napi_enabled
= false;
786 static int ibmvnic_login(struct net_device
*netdev
)
788 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
789 unsigned long timeout
= msecs_to_jiffies(30000);
796 if (retry_count
> IBMVNIC_MAX_QUEUES
) {
797 netdev_warn(netdev
, "Login attempts exceeded\n");
801 adapter
->init_done_rc
= 0;
802 reinit_completion(&adapter
->init_done
);
803 rc
= send_login(adapter
);
805 netdev_warn(netdev
, "Unable to login\n");
809 if (!wait_for_completion_timeout(&adapter
->init_done
,
811 netdev_warn(netdev
, "Login timed out\n");
815 if (adapter
->init_done_rc
== PARTIALSUCCESS
) {
817 release_sub_crqs(adapter
, 1);
821 "Received partial success, retrying...\n");
822 adapter
->init_done_rc
= 0;
823 reinit_completion(&adapter
->init_done
);
824 send_cap_queries(adapter
);
825 if (!wait_for_completion_timeout(&adapter
->init_done
,
828 "Capabilities query timed out\n");
832 rc
= init_sub_crqs(adapter
);
835 "SCRQ initialization failed\n");
839 rc
= init_sub_crq_irqs(adapter
);
842 "SCRQ irq initialization failed\n");
845 } else if (adapter
->init_done_rc
) {
846 netdev_warn(netdev
, "Adapter login failed\n");
851 /* handle pending MAC address changes after successful login */
852 if (adapter
->mac_change_pending
) {
853 __ibmvnic_set_mac(netdev
, &adapter
->desired
.mac
);
854 adapter
->mac_change_pending
= false;
860 static void release_login_buffer(struct ibmvnic_adapter
*adapter
)
862 kfree(adapter
->login_buf
);
863 adapter
->login_buf
= NULL
;
866 static void release_login_rsp_buffer(struct ibmvnic_adapter
*adapter
)
868 kfree(adapter
->login_rsp_buf
);
869 adapter
->login_rsp_buf
= NULL
;
872 static void release_resources(struct ibmvnic_adapter
*adapter
)
874 release_vpd_data(adapter
);
876 release_tx_pools(adapter
);
877 release_rx_pools(adapter
);
879 release_napi(adapter
);
880 release_login_rsp_buffer(adapter
);
883 static int set_link_state(struct ibmvnic_adapter
*adapter
, u8 link_state
)
885 struct net_device
*netdev
= adapter
->netdev
;
886 unsigned long timeout
= msecs_to_jiffies(30000);
887 union ibmvnic_crq crq
;
891 netdev_dbg(netdev
, "setting link state %d\n", link_state
);
893 memset(&crq
, 0, sizeof(crq
));
894 crq
.logical_link_state
.first
= IBMVNIC_CRQ_CMD
;
895 crq
.logical_link_state
.cmd
= LOGICAL_LINK_STATE
;
896 crq
.logical_link_state
.link_state
= link_state
;
901 reinit_completion(&adapter
->init_done
);
902 rc
= ibmvnic_send_crq(adapter
, &crq
);
904 netdev_err(netdev
, "Failed to set link state\n");
908 if (!wait_for_completion_timeout(&adapter
->init_done
,
910 netdev_err(netdev
, "timeout setting link state\n");
914 if (adapter
->init_done_rc
== 1) {
915 /* Partuial success, delay and re-send */
918 } else if (adapter
->init_done_rc
) {
919 netdev_warn(netdev
, "Unable to set link state, rc=%d\n",
920 adapter
->init_done_rc
);
921 return adapter
->init_done_rc
;
928 static int set_real_num_queues(struct net_device
*netdev
)
930 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
933 netdev_dbg(netdev
, "Setting real tx/rx queues (%llx/%llx)\n",
934 adapter
->req_tx_queues
, adapter
->req_rx_queues
);
936 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->req_tx_queues
);
938 netdev_err(netdev
, "failed to set the number of tx queues\n");
942 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->req_rx_queues
);
944 netdev_err(netdev
, "failed to set the number of rx queues\n");
949 static int ibmvnic_get_vpd(struct ibmvnic_adapter
*adapter
)
951 struct device
*dev
= &adapter
->vdev
->dev
;
952 union ibmvnic_crq crq
;
956 if (adapter
->vpd
->buff
)
957 len
= adapter
->vpd
->len
;
959 init_completion(&adapter
->fw_done
);
960 crq
.get_vpd_size
.first
= IBMVNIC_CRQ_CMD
;
961 crq
.get_vpd_size
.cmd
= GET_VPD_SIZE
;
962 rc
= ibmvnic_send_crq(adapter
, &crq
);
965 wait_for_completion(&adapter
->fw_done
);
967 if (!adapter
->vpd
->len
)
970 if (!adapter
->vpd
->buff
)
971 adapter
->vpd
->buff
= kzalloc(adapter
->vpd
->len
, GFP_KERNEL
);
972 else if (adapter
->vpd
->len
!= len
)
974 krealloc(adapter
->vpd
->buff
,
975 adapter
->vpd
->len
, GFP_KERNEL
);
977 if (!adapter
->vpd
->buff
) {
978 dev_err(dev
, "Could allocate VPD buffer\n");
982 adapter
->vpd
->dma_addr
=
983 dma_map_single(dev
, adapter
->vpd
->buff
, adapter
->vpd
->len
,
985 if (dma_mapping_error(dev
, adapter
->vpd
->dma_addr
)) {
986 dev_err(dev
, "Could not map VPD buffer\n");
987 kfree(adapter
->vpd
->buff
);
988 adapter
->vpd
->buff
= NULL
;
992 reinit_completion(&adapter
->fw_done
);
993 crq
.get_vpd
.first
= IBMVNIC_CRQ_CMD
;
994 crq
.get_vpd
.cmd
= GET_VPD
;
995 crq
.get_vpd
.ioba
= cpu_to_be32(adapter
->vpd
->dma_addr
);
996 crq
.get_vpd
.len
= cpu_to_be32((u32
)adapter
->vpd
->len
);
997 rc
= ibmvnic_send_crq(adapter
, &crq
);
999 kfree(adapter
->vpd
->buff
);
1000 adapter
->vpd
->buff
= NULL
;
1003 wait_for_completion(&adapter
->fw_done
);
1008 static int init_resources(struct ibmvnic_adapter
*adapter
)
1010 struct net_device
*netdev
= adapter
->netdev
;
1013 rc
= set_real_num_queues(netdev
);
1017 adapter
->vpd
= kzalloc(sizeof(*adapter
->vpd
), GFP_KERNEL
);
1021 /* Vital Product Data (VPD) */
1022 rc
= ibmvnic_get_vpd(adapter
);
1024 netdev_err(netdev
, "failed to initialize Vital Product Data (VPD)\n");
1028 adapter
->map_id
= 1;
1030 rc
= init_napi(adapter
);
1034 send_map_query(adapter
);
1036 rc
= init_rx_pools(netdev
);
1040 rc
= init_tx_pools(netdev
);
1044 static int __ibmvnic_open(struct net_device
*netdev
)
1046 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1047 enum vnic_state prev_state
= adapter
->state
;
1050 adapter
->state
= VNIC_OPENING
;
1051 replenish_pools(adapter
);
1052 ibmvnic_napi_enable(adapter
);
1054 /* We're ready to receive frames, enable the sub-crq interrupts and
1055 * set the logical link state to up
1057 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1058 netdev_dbg(netdev
, "Enabling rx_scrq[%d] irq\n", i
);
1059 if (prev_state
== VNIC_CLOSED
)
1060 enable_irq(adapter
->rx_scrq
[i
]->irq
);
1061 enable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
1064 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
1065 netdev_dbg(netdev
, "Enabling tx_scrq[%d] irq\n", i
);
1066 if (prev_state
== VNIC_CLOSED
)
1067 enable_irq(adapter
->tx_scrq
[i
]->irq
);
1068 enable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
1071 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_UP
);
1073 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1074 napi_disable(&adapter
->napi
[i
]);
1075 release_resources(adapter
);
1079 netif_tx_start_all_queues(netdev
);
1081 if (prev_state
== VNIC_CLOSED
) {
1082 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1083 napi_schedule(&adapter
->napi
[i
]);
1086 adapter
->state
= VNIC_OPEN
;
1090 static int ibmvnic_open(struct net_device
*netdev
)
1092 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1095 /* If device failover is pending, just set device state and return.
1096 * Device operation will be handled by reset routine.
1098 if (adapter
->failover_pending
) {
1099 adapter
->state
= VNIC_OPEN
;
1103 if (adapter
->state
!= VNIC_CLOSED
) {
1104 rc
= ibmvnic_login(netdev
);
1108 rc
= init_resources(adapter
);
1110 netdev_err(netdev
, "failed to initialize resources\n");
1111 release_resources(adapter
);
1116 rc
= __ibmvnic_open(netdev
);
1117 netif_carrier_on(netdev
);
1122 static void clean_rx_pools(struct ibmvnic_adapter
*adapter
)
1124 struct ibmvnic_rx_pool
*rx_pool
;
1125 struct ibmvnic_rx_buff
*rx_buff
;
1130 if (!adapter
->rx_pool
)
1133 rx_scrqs
= adapter
->num_active_rx_pools
;
1134 rx_entries
= adapter
->req_rx_add_entries_per_subcrq
;
1136 /* Free any remaining skbs in the rx buffer pools */
1137 for (i
= 0; i
< rx_scrqs
; i
++) {
1138 rx_pool
= &adapter
->rx_pool
[i
];
1139 if (!rx_pool
|| !rx_pool
->rx_buff
)
1142 netdev_dbg(adapter
->netdev
, "Cleaning rx_pool[%d]\n", i
);
1143 for (j
= 0; j
< rx_entries
; j
++) {
1144 rx_buff
= &rx_pool
->rx_buff
[j
];
1145 if (rx_buff
&& rx_buff
->skb
) {
1146 dev_kfree_skb_any(rx_buff
->skb
);
1147 rx_buff
->skb
= NULL
;
1153 static void clean_one_tx_pool(struct ibmvnic_adapter
*adapter
,
1154 struct ibmvnic_tx_pool
*tx_pool
)
1156 struct ibmvnic_tx_buff
*tx_buff
;
1160 if (!tx_pool
|| !tx_pool
->tx_buff
)
1163 tx_entries
= tx_pool
->num_buffers
;
1165 for (i
= 0; i
< tx_entries
; i
++) {
1166 tx_buff
= &tx_pool
->tx_buff
[i
];
1167 if (tx_buff
&& tx_buff
->skb
) {
1168 dev_kfree_skb_any(tx_buff
->skb
);
1169 tx_buff
->skb
= NULL
;
1174 static void clean_tx_pools(struct ibmvnic_adapter
*adapter
)
1179 if (!adapter
->tx_pool
|| !adapter
->tso_pool
)
1182 tx_scrqs
= adapter
->num_active_tx_pools
;
1184 /* Free any remaining skbs in the tx buffer pools */
1185 for (i
= 0; i
< tx_scrqs
; i
++) {
1186 netdev_dbg(adapter
->netdev
, "Cleaning tx_pool[%d]\n", i
);
1187 clean_one_tx_pool(adapter
, &adapter
->tx_pool
[i
]);
1188 clean_one_tx_pool(adapter
, &adapter
->tso_pool
[i
]);
1192 static void ibmvnic_disable_irqs(struct ibmvnic_adapter
*adapter
)
1194 struct net_device
*netdev
= adapter
->netdev
;
1197 if (adapter
->tx_scrq
) {
1198 for (i
= 0; i
< adapter
->req_tx_queues
; i
++)
1199 if (adapter
->tx_scrq
[i
]->irq
) {
1201 "Disabling tx_scrq[%d] irq\n", i
);
1202 disable_scrq_irq(adapter
, adapter
->tx_scrq
[i
]);
1203 disable_irq(adapter
->tx_scrq
[i
]->irq
);
1207 if (adapter
->rx_scrq
) {
1208 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
1209 if (adapter
->rx_scrq
[i
]->irq
) {
1211 "Disabling rx_scrq[%d] irq\n", i
);
1212 disable_scrq_irq(adapter
, adapter
->rx_scrq
[i
]);
1213 disable_irq(adapter
->rx_scrq
[i
]->irq
);
1219 static void ibmvnic_cleanup(struct net_device
*netdev
)
1221 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1223 /* ensure that transmissions are stopped if called by do_reset */
1224 if (adapter
->resetting
)
1225 netif_tx_disable(netdev
);
1227 netif_tx_stop_all_queues(netdev
);
1229 ibmvnic_napi_disable(adapter
);
1230 ibmvnic_disable_irqs(adapter
);
1232 clean_rx_pools(adapter
);
1233 clean_tx_pools(adapter
);
1236 static int __ibmvnic_close(struct net_device
*netdev
)
1238 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1241 adapter
->state
= VNIC_CLOSING
;
1242 rc
= set_link_state(adapter
, IBMVNIC_LOGICAL_LNK_DN
);
1245 adapter
->state
= VNIC_CLOSED
;
1249 static int ibmvnic_close(struct net_device
*netdev
)
1251 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1254 /* If device failover is pending, just set device state and return.
1255 * Device operation will be handled by reset routine.
1257 if (adapter
->failover_pending
) {
1258 adapter
->state
= VNIC_CLOSED
;
1262 rc
= __ibmvnic_close(netdev
);
1263 ibmvnic_cleanup(netdev
);
1269 * build_hdr_data - creates L2/L3/L4 header data buffer
1270 * @hdr_field - bitfield determining needed headers
1271 * @skb - socket buffer
1272 * @hdr_len - array of header lengths
1273 * @tot_len - total length of data
1275 * Reads hdr_field to determine which headers are needed by firmware.
1276 * Builds a buffer containing these headers. Saves individual header
1277 * lengths and total buffer length to be used to build descriptors.
1279 static int build_hdr_data(u8 hdr_field
, struct sk_buff
*skb
,
1280 int *hdr_len
, u8
*hdr_data
)
1285 if (skb_vlan_tagged(skb
) && !skb_vlan_tag_present(skb
))
1286 hdr_len
[0] = sizeof(struct vlan_ethhdr
);
1288 hdr_len
[0] = sizeof(struct ethhdr
);
1290 if (skb
->protocol
== htons(ETH_P_IP
)) {
1291 hdr_len
[1] = ip_hdr(skb
)->ihl
* 4;
1292 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1293 hdr_len
[2] = tcp_hdrlen(skb
);
1294 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
1295 hdr_len
[2] = sizeof(struct udphdr
);
1296 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1297 hdr_len
[1] = sizeof(struct ipv6hdr
);
1298 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1299 hdr_len
[2] = tcp_hdrlen(skb
);
1300 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_UDP
)
1301 hdr_len
[2] = sizeof(struct udphdr
);
1302 } else if (skb
->protocol
== htons(ETH_P_ARP
)) {
1303 hdr_len
[1] = arp_hdr_len(skb
->dev
);
1307 memset(hdr_data
, 0, 120);
1308 if ((hdr_field
>> 6) & 1) {
1309 hdr
= skb_mac_header(skb
);
1310 memcpy(hdr_data
, hdr
, hdr_len
[0]);
1314 if ((hdr_field
>> 5) & 1) {
1315 hdr
= skb_network_header(skb
);
1316 memcpy(hdr_data
+ len
, hdr
, hdr_len
[1]);
1320 if ((hdr_field
>> 4) & 1) {
1321 hdr
= skb_transport_header(skb
);
1322 memcpy(hdr_data
+ len
, hdr
, hdr_len
[2]);
1329 * create_hdr_descs - create header and header extension descriptors
1330 * @hdr_field - bitfield determining needed headers
1331 * @data - buffer containing header data
1332 * @len - length of data buffer
1333 * @hdr_len - array of individual header lengths
1334 * @scrq_arr - descriptor array
1336 * Creates header and, if needed, header extension descriptors and
1337 * places them in a descriptor array, scrq_arr
1340 static int create_hdr_descs(u8 hdr_field
, u8
*hdr_data
, int len
, int *hdr_len
,
1341 union sub_crq
*scrq_arr
)
1343 union sub_crq hdr_desc
;
1349 while (tmp_len
> 0) {
1350 cur
= hdr_data
+ len
- tmp_len
;
1352 memset(&hdr_desc
, 0, sizeof(hdr_desc
));
1353 if (cur
!= hdr_data
) {
1354 data
= hdr_desc
.hdr_ext
.data
;
1355 tmp
= tmp_len
> 29 ? 29 : tmp_len
;
1356 hdr_desc
.hdr_ext
.first
= IBMVNIC_CRQ_CMD
;
1357 hdr_desc
.hdr_ext
.type
= IBMVNIC_HDR_EXT_DESC
;
1358 hdr_desc
.hdr_ext
.len
= tmp
;
1360 data
= hdr_desc
.hdr
.data
;
1361 tmp
= tmp_len
> 24 ? 24 : tmp_len
;
1362 hdr_desc
.hdr
.first
= IBMVNIC_CRQ_CMD
;
1363 hdr_desc
.hdr
.type
= IBMVNIC_HDR_DESC
;
1364 hdr_desc
.hdr
.len
= tmp
;
1365 hdr_desc
.hdr
.l2_len
= (u8
)hdr_len
[0];
1366 hdr_desc
.hdr
.l3_len
= cpu_to_be16((u16
)hdr_len
[1]);
1367 hdr_desc
.hdr
.l4_len
= (u8
)hdr_len
[2];
1368 hdr_desc
.hdr
.flag
= hdr_field
<< 1;
1370 memcpy(data
, cur
, tmp
);
1372 *scrq_arr
= hdr_desc
;
1381 * build_hdr_descs_arr - build a header descriptor array
1382 * @skb - socket buffer
1383 * @num_entries - number of descriptors to be sent
1384 * @subcrq - first TX descriptor
1385 * @hdr_field - bit field determining which headers will be sent
1387 * This function will build a TX descriptor array with applicable
1388 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1391 static void build_hdr_descs_arr(struct ibmvnic_tx_buff
*txbuff
,
1392 int *num_entries
, u8 hdr_field
)
1394 int hdr_len
[3] = {0, 0, 0};
1396 u8
*hdr_data
= txbuff
->hdr_data
;
1398 tot_len
= build_hdr_data(hdr_field
, txbuff
->skb
, hdr_len
,
1400 *num_entries
+= create_hdr_descs(hdr_field
, hdr_data
, tot_len
, hdr_len
,
1401 txbuff
->indir_arr
+ 1);
1404 static int ibmvnic_xmit_workarounds(struct sk_buff
*skb
,
1405 struct net_device
*netdev
)
1407 /* For some backing devices, mishandling of small packets
1408 * can result in a loss of connection or TX stall. Device
1409 * architects recommend that no packet should be smaller
1410 * than the minimum MTU value provided to the driver, so
1411 * pad any packets to that length
1413 if (skb
->len
< netdev
->min_mtu
)
1414 return skb_put_padto(skb
, netdev
->min_mtu
);
1419 static netdev_tx_t
ibmvnic_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1421 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1422 int queue_num
= skb_get_queue_mapping(skb
);
1423 u8
*hdrs
= (u8
*)&adapter
->tx_rx_desc_req
;
1424 struct device
*dev
= &adapter
->vdev
->dev
;
1425 struct ibmvnic_tx_buff
*tx_buff
= NULL
;
1426 struct ibmvnic_sub_crq_queue
*tx_scrq
;
1427 struct ibmvnic_tx_pool
*tx_pool
;
1428 unsigned int tx_send_failed
= 0;
1429 unsigned int tx_map_failed
= 0;
1430 unsigned int tx_dropped
= 0;
1431 unsigned int tx_packets
= 0;
1432 unsigned int tx_bytes
= 0;
1433 dma_addr_t data_dma_addr
;
1434 struct netdev_queue
*txq
;
1435 unsigned long lpar_rc
;
1436 union sub_crq tx_crq
;
1437 unsigned int offset
;
1438 int num_entries
= 1;
1443 netdev_tx_t ret
= NETDEV_TX_OK
;
1445 if (adapter
->resetting
) {
1446 if (!netif_subqueue_stopped(netdev
, skb
))
1447 netif_stop_subqueue(netdev
, queue_num
);
1448 dev_kfree_skb_any(skb
);
1456 if (ibmvnic_xmit_workarounds(skb
, netdev
)) {
1462 if (skb_is_gso(skb
))
1463 tx_pool
= &adapter
->tso_pool
[queue_num
];
1465 tx_pool
= &adapter
->tx_pool
[queue_num
];
1467 tx_scrq
= adapter
->tx_scrq
[queue_num
];
1468 txq
= netdev_get_tx_queue(netdev
, skb_get_queue_mapping(skb
));
1469 handle_array
= (u64
*)((u8
*)(adapter
->login_rsp_buf
) +
1470 be32_to_cpu(adapter
->login_rsp_buf
->off_txsubm_subcrqs
));
1472 index
= tx_pool
->free_map
[tx_pool
->consumer_index
];
1474 if (index
== IBMVNIC_INVALID_MAP
) {
1475 dev_kfree_skb_any(skb
);
1482 tx_pool
->free_map
[tx_pool
->consumer_index
] = IBMVNIC_INVALID_MAP
;
1484 offset
= index
* tx_pool
->buf_size
;
1485 dst
= tx_pool
->long_term_buff
.buff
+ offset
;
1486 memset(dst
, 0, tx_pool
->buf_size
);
1487 data_dma_addr
= tx_pool
->long_term_buff
.addr
+ offset
;
1489 if (skb_shinfo(skb
)->nr_frags
) {
1493 skb_copy_from_linear_data(skb
, dst
, skb_headlen(skb
));
1494 cur
= skb_headlen(skb
);
1496 /* Copy the frags */
1497 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1498 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1501 page_address(skb_frag_page(frag
)) +
1502 frag
->page_offset
, skb_frag_size(frag
));
1503 cur
+= skb_frag_size(frag
);
1506 skb_copy_from_linear_data(skb
, dst
, skb
->len
);
1509 tx_pool
->consumer_index
=
1510 (tx_pool
->consumer_index
+ 1) % tx_pool
->num_buffers
;
1512 tx_buff
= &tx_pool
->tx_buff
[index
];
1514 tx_buff
->data_dma
[0] = data_dma_addr
;
1515 tx_buff
->data_len
[0] = skb
->len
;
1516 tx_buff
->index
= index
;
1517 tx_buff
->pool_index
= queue_num
;
1518 tx_buff
->last_frag
= true;
1520 memset(&tx_crq
, 0, sizeof(tx_crq
));
1521 tx_crq
.v1
.first
= IBMVNIC_CRQ_CMD
;
1522 tx_crq
.v1
.type
= IBMVNIC_TX_DESC
;
1523 tx_crq
.v1
.n_crq_elem
= 1;
1524 tx_crq
.v1
.n_sge
= 1;
1525 tx_crq
.v1
.flags1
= IBMVNIC_TX_COMP_NEEDED
;
1527 if (skb_is_gso(skb
))
1528 tx_crq
.v1
.correlator
=
1529 cpu_to_be32(index
| IBMVNIC_TSO_POOL_MASK
);
1531 tx_crq
.v1
.correlator
= cpu_to_be32(index
);
1532 tx_crq
.v1
.dma_reg
= cpu_to_be16(tx_pool
->long_term_buff
.map_id
);
1533 tx_crq
.v1
.sge_len
= cpu_to_be32(skb
->len
);
1534 tx_crq
.v1
.ioba
= cpu_to_be64(data_dma_addr
);
1536 if (adapter
->vlan_header_insertion
&& skb_vlan_tag_present(skb
)) {
1537 tx_crq
.v1
.flags2
|= IBMVNIC_TX_VLAN_INSERT
;
1538 tx_crq
.v1
.vlan_id
= cpu_to_be16(skb
->vlan_tci
);
1541 if (skb
->protocol
== htons(ETH_P_IP
)) {
1542 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV4
;
1543 proto
= ip_hdr(skb
)->protocol
;
1544 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1545 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_IPV6
;
1546 proto
= ipv6_hdr(skb
)->nexthdr
;
1549 if (proto
== IPPROTO_TCP
)
1550 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_TCP
;
1551 else if (proto
== IPPROTO_UDP
)
1552 tx_crq
.v1
.flags1
|= IBMVNIC_TX_PROT_UDP
;
1554 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1555 tx_crq
.v1
.flags1
|= IBMVNIC_TX_CHKSUM_OFFLOAD
;
1558 if (skb_is_gso(skb
)) {
1559 tx_crq
.v1
.flags1
|= IBMVNIC_TX_LSO
;
1560 tx_crq
.v1
.mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
1563 /* determine if l2/3/4 headers are sent to firmware */
1564 if ((*hdrs
>> 7) & 1) {
1565 build_hdr_descs_arr(tx_buff
, &num_entries
, *hdrs
);
1566 tx_crq
.v1
.n_crq_elem
= num_entries
;
1567 tx_buff
->num_entries
= num_entries
;
1568 tx_buff
->indir_arr
[0] = tx_crq
;
1569 tx_buff
->indir_dma
= dma_map_single(dev
, tx_buff
->indir_arr
,
1570 sizeof(tx_buff
->indir_arr
),
1572 if (dma_mapping_error(dev
, tx_buff
->indir_dma
)) {
1573 dev_kfree_skb_any(skb
);
1574 tx_buff
->skb
= NULL
;
1575 if (!firmware_has_feature(FW_FEATURE_CMO
))
1576 dev_err(dev
, "tx: unable to map descriptor array\n");
1582 lpar_rc
= send_subcrq_indirect(adapter
, handle_array
[queue_num
],
1583 (u64
)tx_buff
->indir_dma
,
1586 tx_buff
->num_entries
= num_entries
;
1587 lpar_rc
= send_subcrq(adapter
, handle_array
[queue_num
],
1590 if (lpar_rc
!= H_SUCCESS
) {
1591 if (lpar_rc
!= H_CLOSED
&& lpar_rc
!= H_PARAMETER
)
1592 dev_err_ratelimited(dev
, "tx: send failed\n");
1593 dev_kfree_skb_any(skb
);
1594 tx_buff
->skb
= NULL
;
1596 if (lpar_rc
== H_CLOSED
|| adapter
->failover_pending
) {
1597 /* Disable TX and report carrier off if queue is closed
1598 * or pending failover.
1599 * Firmware guarantees that a signal will be sent to the
1600 * driver, triggering a reset or some other action.
1602 netif_tx_stop_all_queues(netdev
);
1603 netif_carrier_off(netdev
);
1612 if (atomic_add_return(num_entries
, &tx_scrq
->used
)
1613 >= adapter
->req_tx_entries_per_subcrq
) {
1614 netdev_dbg(netdev
, "Stopping queue %d\n", queue_num
);
1615 netif_stop_subqueue(netdev
, queue_num
);
1619 tx_bytes
+= skb
->len
;
1620 txq
->trans_start
= jiffies
;
1625 /* roll back consumer index and map array*/
1626 if (tx_pool
->consumer_index
== 0)
1627 tx_pool
->consumer_index
=
1628 tx_pool
->num_buffers
- 1;
1630 tx_pool
->consumer_index
--;
1631 tx_pool
->free_map
[tx_pool
->consumer_index
] = index
;
1633 netdev
->stats
.tx_dropped
+= tx_dropped
;
1634 netdev
->stats
.tx_bytes
+= tx_bytes
;
1635 netdev
->stats
.tx_packets
+= tx_packets
;
1636 adapter
->tx_send_failed
+= tx_send_failed
;
1637 adapter
->tx_map_failed
+= tx_map_failed
;
1638 adapter
->tx_stats_buffers
[queue_num
].packets
+= tx_packets
;
1639 adapter
->tx_stats_buffers
[queue_num
].bytes
+= tx_bytes
;
1640 adapter
->tx_stats_buffers
[queue_num
].dropped_packets
+= tx_dropped
;
1645 static void ibmvnic_set_multi(struct net_device
*netdev
)
1647 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1648 struct netdev_hw_addr
*ha
;
1649 union ibmvnic_crq crq
;
1651 memset(&crq
, 0, sizeof(crq
));
1652 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
1653 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
1655 if (netdev
->flags
& IFF_PROMISC
) {
1656 if (!adapter
->promisc_supported
)
1659 if (netdev
->flags
& IFF_ALLMULTI
) {
1660 /* Accept all multicast */
1661 memset(&crq
, 0, sizeof(crq
));
1662 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1663 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1664 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_ALL
;
1665 ibmvnic_send_crq(adapter
, &crq
);
1666 } else if (netdev_mc_empty(netdev
)) {
1667 /* Reject all multicast */
1668 memset(&crq
, 0, sizeof(crq
));
1669 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1670 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1671 crq
.multicast_ctrl
.flags
= IBMVNIC_DISABLE_ALL
;
1672 ibmvnic_send_crq(adapter
, &crq
);
1674 /* Accept one or more multicast(s) */
1675 netdev_for_each_mc_addr(ha
, netdev
) {
1676 memset(&crq
, 0, sizeof(crq
));
1677 crq
.multicast_ctrl
.first
= IBMVNIC_CRQ_CMD
;
1678 crq
.multicast_ctrl
.cmd
= MULTICAST_CTRL
;
1679 crq
.multicast_ctrl
.flags
= IBMVNIC_ENABLE_MC
;
1680 ether_addr_copy(&crq
.multicast_ctrl
.mac_addr
[0],
1682 ibmvnic_send_crq(adapter
, &crq
);
1688 static int __ibmvnic_set_mac(struct net_device
*netdev
, struct sockaddr
*p
)
1690 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1691 struct sockaddr
*addr
= p
;
1692 union ibmvnic_crq crq
;
1695 if (!is_valid_ether_addr(addr
->sa_data
))
1696 return -EADDRNOTAVAIL
;
1698 memset(&crq
, 0, sizeof(crq
));
1699 crq
.change_mac_addr
.first
= IBMVNIC_CRQ_CMD
;
1700 crq
.change_mac_addr
.cmd
= CHANGE_MAC_ADDR
;
1701 ether_addr_copy(&crq
.change_mac_addr
.mac_addr
[0], addr
->sa_data
);
1703 init_completion(&adapter
->fw_done
);
1704 rc
= ibmvnic_send_crq(adapter
, &crq
);
1707 wait_for_completion(&adapter
->fw_done
);
1708 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1709 return adapter
->fw_done_rc
? -EIO
: 0;
1712 static int ibmvnic_set_mac(struct net_device
*netdev
, void *p
)
1714 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
1715 struct sockaddr
*addr
= p
;
1718 if (adapter
->state
== VNIC_PROBED
) {
1719 memcpy(&adapter
->desired
.mac
, addr
, sizeof(struct sockaddr
));
1720 adapter
->mac_change_pending
= true;
1724 rc
= __ibmvnic_set_mac(netdev
, addr
);
1730 * do_reset returns zero if we are able to keep processing reset events, or
1731 * non-zero if we hit a fatal error and must halt.
1733 static int do_reset(struct ibmvnic_adapter
*adapter
,
1734 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
1736 u64 old_num_rx_queues
, old_num_tx_queues
;
1737 u64 old_num_rx_slots
, old_num_tx_slots
;
1738 struct net_device
*netdev
= adapter
->netdev
;
1741 netdev_dbg(adapter
->netdev
, "Re-setting driver (%d)\n",
1744 netif_carrier_off(netdev
);
1745 adapter
->reset_reason
= rwi
->reset_reason
;
1747 old_num_rx_queues
= adapter
->req_rx_queues
;
1748 old_num_tx_queues
= adapter
->req_tx_queues
;
1749 old_num_rx_slots
= adapter
->req_rx_add_entries_per_subcrq
;
1750 old_num_tx_slots
= adapter
->req_tx_entries_per_subcrq
;
1752 ibmvnic_cleanup(netdev
);
1754 if (adapter
->reset_reason
!= VNIC_RESET_MOBILITY
&&
1755 adapter
->reset_reason
!= VNIC_RESET_FAILOVER
) {
1756 rc
= __ibmvnic_close(netdev
);
1761 if (adapter
->reset_reason
== VNIC_RESET_CHANGE_PARAM
||
1762 adapter
->wait_for_reset
) {
1763 release_resources(adapter
);
1764 release_sub_crqs(adapter
, 1);
1765 release_crq_queue(adapter
);
1768 if (adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
) {
1769 /* remove the closed state so when we call open it appears
1770 * we are coming from the probed state.
1772 adapter
->state
= VNIC_PROBED
;
1774 if (adapter
->wait_for_reset
) {
1775 rc
= init_crq_queue(adapter
);
1776 } else if (adapter
->reset_reason
== VNIC_RESET_MOBILITY
) {
1777 rc
= ibmvnic_reenable_crq_queue(adapter
);
1778 release_sub_crqs(adapter
, 1);
1780 rc
= ibmvnic_reset_crq(adapter
);
1782 rc
= vio_enable_interrupts(adapter
->vdev
);
1786 netdev_err(adapter
->netdev
,
1787 "Couldn't initialize crq. rc=%d\n", rc
);
1791 rc
= ibmvnic_reset_init(adapter
);
1793 return IBMVNIC_INIT_FAILED
;
1795 /* If the adapter was in PROBE state prior to the reset,
1798 if (reset_state
== VNIC_PROBED
)
1801 rc
= ibmvnic_login(netdev
);
1803 adapter
->state
= reset_state
;
1807 if (adapter
->reset_reason
== VNIC_RESET_CHANGE_PARAM
||
1808 adapter
->wait_for_reset
) {
1809 rc
= init_resources(adapter
);
1812 } else if (adapter
->req_rx_queues
!= old_num_rx_queues
||
1813 adapter
->req_tx_queues
!= old_num_tx_queues
||
1814 adapter
->req_rx_add_entries_per_subcrq
!=
1816 adapter
->req_tx_entries_per_subcrq
!=
1818 release_rx_pools(adapter
);
1819 release_tx_pools(adapter
);
1820 release_napi(adapter
);
1821 release_vpd_data(adapter
);
1823 rc
= init_resources(adapter
);
1828 rc
= reset_tx_pools(adapter
);
1832 rc
= reset_rx_pools(adapter
);
1836 ibmvnic_disable_irqs(adapter
);
1838 adapter
->state
= VNIC_CLOSED
;
1840 if (reset_state
== VNIC_CLOSED
)
1843 rc
= __ibmvnic_open(netdev
);
1845 if (list_empty(&adapter
->rwi_list
))
1846 adapter
->state
= VNIC_CLOSED
;
1848 adapter
->state
= reset_state
;
1854 for (i
= 0; i
< adapter
->req_rx_queues
; i
++)
1855 napi_schedule(&adapter
->napi
[i
]);
1857 if (adapter
->reset_reason
!= VNIC_RESET_FAILOVER
&&
1858 adapter
->reset_reason
!= VNIC_RESET_CHANGE_PARAM
)
1859 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, netdev
);
1861 netif_carrier_on(netdev
);
1866 static int do_hard_reset(struct ibmvnic_adapter
*adapter
,
1867 struct ibmvnic_rwi
*rwi
, u32 reset_state
)
1869 struct net_device
*netdev
= adapter
->netdev
;
1872 netdev_dbg(adapter
->netdev
, "Hard resetting driver (%d)\n",
1875 netif_carrier_off(netdev
);
1876 adapter
->reset_reason
= rwi
->reset_reason
;
1878 ibmvnic_cleanup(netdev
);
1879 release_resources(adapter
);
1880 release_sub_crqs(adapter
, 0);
1881 release_crq_queue(adapter
);
1883 /* remove the closed state so when we call open it appears
1884 * we are coming from the probed state.
1886 adapter
->state
= VNIC_PROBED
;
1888 reinit_completion(&adapter
->init_done
);
1889 rc
= init_crq_queue(adapter
);
1891 netdev_err(adapter
->netdev
,
1892 "Couldn't initialize crq. rc=%d\n", rc
);
1896 rc
= ibmvnic_init(adapter
);
1900 /* If the adapter was in PROBE state prior to the reset,
1903 if (reset_state
== VNIC_PROBED
)
1906 rc
= ibmvnic_login(netdev
);
1908 adapter
->state
= VNIC_PROBED
;
1912 rc
= init_resources(adapter
);
1916 ibmvnic_disable_irqs(adapter
);
1917 adapter
->state
= VNIC_CLOSED
;
1919 if (reset_state
== VNIC_CLOSED
)
1922 rc
= __ibmvnic_open(netdev
);
1924 if (list_empty(&adapter
->rwi_list
))
1925 adapter
->state
= VNIC_CLOSED
;
1927 adapter
->state
= reset_state
;
1932 netif_carrier_on(netdev
);
1937 static struct ibmvnic_rwi
*get_next_rwi(struct ibmvnic_adapter
*adapter
)
1939 struct ibmvnic_rwi
*rwi
;
1940 unsigned long flags
;
1942 spin_lock_irqsave(&adapter
->rwi_lock
, flags
);
1944 if (!list_empty(&adapter
->rwi_list
)) {
1945 rwi
= list_first_entry(&adapter
->rwi_list
, struct ibmvnic_rwi
,
1947 list_del(&rwi
->list
);
1952 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
1956 static void free_all_rwi(struct ibmvnic_adapter
*adapter
)
1958 struct ibmvnic_rwi
*rwi
;
1960 rwi
= get_next_rwi(adapter
);
1963 rwi
= get_next_rwi(adapter
);
1967 static void __ibmvnic_reset(struct work_struct
*work
)
1969 struct ibmvnic_rwi
*rwi
;
1970 struct ibmvnic_adapter
*adapter
;
1971 struct net_device
*netdev
;
1972 bool we_lock_rtnl
= false;
1976 adapter
= container_of(work
, struct ibmvnic_adapter
, ibmvnic_reset
);
1977 netdev
= adapter
->netdev
;
1979 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1980 * unless wait_for_reset is set, in which case the rtnl lock
1981 * has already been taken before initializing the reset
1983 if (!adapter
->wait_for_reset
) {
1985 we_lock_rtnl
= true;
1987 reset_state
= adapter
->state
;
1989 rwi
= get_next_rwi(adapter
);
1991 if (adapter
->force_reset_recovery
) {
1992 adapter
->force_reset_recovery
= false;
1993 rc
= do_hard_reset(adapter
, rwi
, reset_state
);
1995 rc
= do_reset(adapter
, rwi
, reset_state
);
1998 if (rc
&& rc
!= IBMVNIC_INIT_FAILED
&&
1999 !adapter
->force_reset_recovery
)
2002 rwi
= get_next_rwi(adapter
);
2005 if (adapter
->wait_for_reset
) {
2006 adapter
->wait_for_reset
= false;
2007 adapter
->reset_done_rc
= rc
;
2008 complete(&adapter
->reset_done
);
2012 netdev_dbg(adapter
->netdev
, "Reset failed\n");
2013 free_all_rwi(adapter
);
2016 adapter
->resetting
= false;
2021 static int ibmvnic_reset(struct ibmvnic_adapter
*adapter
,
2022 enum ibmvnic_reset_reason reason
)
2024 struct list_head
*entry
, *tmp_entry
;
2025 struct ibmvnic_rwi
*rwi
, *tmp
;
2026 struct net_device
*netdev
= adapter
->netdev
;
2027 unsigned long flags
;
2030 if (adapter
->state
== VNIC_REMOVING
||
2031 adapter
->state
== VNIC_REMOVED
||
2032 adapter
->failover_pending
) {
2034 netdev_dbg(netdev
, "Adapter removing or pending failover, skipping reset\n");
2038 if (adapter
->state
== VNIC_PROBING
) {
2039 netdev_warn(netdev
, "Adapter reset during probe\n");
2040 ret
= adapter
->init_done_rc
= EAGAIN
;
2044 spin_lock_irqsave(&adapter
->rwi_lock
, flags
);
2046 list_for_each(entry
, &adapter
->rwi_list
) {
2047 tmp
= list_entry(entry
, struct ibmvnic_rwi
, list
);
2048 if (tmp
->reset_reason
== reason
) {
2049 netdev_dbg(netdev
, "Skipping matching reset\n");
2050 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2056 rwi
= kzalloc(sizeof(*rwi
), GFP_ATOMIC
);
2058 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2059 ibmvnic_close(netdev
);
2063 /* if we just received a transport event,
2064 * flush reset queue and process this reset
2066 if (adapter
->force_reset_recovery
&& !list_empty(&adapter
->rwi_list
)) {
2067 list_for_each_safe(entry
, tmp_entry
, &adapter
->rwi_list
)
2070 rwi
->reset_reason
= reason
;
2071 list_add_tail(&rwi
->list
, &adapter
->rwi_list
);
2072 spin_unlock_irqrestore(&adapter
->rwi_lock
, flags
);
2073 adapter
->resetting
= true;
2074 netdev_dbg(adapter
->netdev
, "Scheduling reset (reason %d)\n", reason
);
2075 schedule_work(&adapter
->ibmvnic_reset
);
2079 if (adapter
->wait_for_reset
)
2080 adapter
->wait_for_reset
= false;
2084 static void ibmvnic_tx_timeout(struct net_device
*dev
)
2086 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2088 ibmvnic_reset(adapter
, VNIC_RESET_TIMEOUT
);
2091 static void remove_buff_from_pool(struct ibmvnic_adapter
*adapter
,
2092 struct ibmvnic_rx_buff
*rx_buff
)
2094 struct ibmvnic_rx_pool
*pool
= &adapter
->rx_pool
[rx_buff
->pool_index
];
2096 rx_buff
->skb
= NULL
;
2098 pool
->free_map
[pool
->next_alloc
] = (int)(rx_buff
- pool
->rx_buff
);
2099 pool
->next_alloc
= (pool
->next_alloc
+ 1) % pool
->size
;
2101 atomic_dec(&pool
->available
);
2104 static int ibmvnic_poll(struct napi_struct
*napi
, int budget
)
2106 struct net_device
*netdev
= napi
->dev
;
2107 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2108 int scrq_num
= (int)(napi
- adapter
->napi
);
2109 int frames_processed
= 0;
2112 while (frames_processed
< budget
) {
2113 struct sk_buff
*skb
;
2114 struct ibmvnic_rx_buff
*rx_buff
;
2115 union sub_crq
*next
;
2120 if (unlikely(adapter
->resetting
&&
2121 adapter
->reset_reason
!= VNIC_RESET_NON_FATAL
)) {
2122 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
2123 napi_complete_done(napi
, frames_processed
);
2124 return frames_processed
;
2127 if (!pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]))
2129 next
= ibmvnic_next_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]);
2131 (struct ibmvnic_rx_buff
*)be64_to_cpu(next
->
2132 rx_comp
.correlator
);
2133 /* do error checking */
2134 if (next
->rx_comp
.rc
) {
2135 netdev_dbg(netdev
, "rx buffer returned with rc %x\n",
2136 be16_to_cpu(next
->rx_comp
.rc
));
2137 /* free the entry */
2138 next
->rx_comp
.first
= 0;
2139 dev_kfree_skb_any(rx_buff
->skb
);
2140 remove_buff_from_pool(adapter
, rx_buff
);
2142 } else if (!rx_buff
->skb
) {
2143 /* free the entry */
2144 next
->rx_comp
.first
= 0;
2145 remove_buff_from_pool(adapter
, rx_buff
);
2149 length
= be32_to_cpu(next
->rx_comp
.len
);
2150 offset
= be16_to_cpu(next
->rx_comp
.off_frame_data
);
2151 flags
= next
->rx_comp
.flags
;
2153 skb_copy_to_linear_data(skb
, rx_buff
->data
+ offset
,
2156 /* VLAN Header has been stripped by the system firmware and
2157 * needs to be inserted by the driver
2159 if (adapter
->rx_vlan_header_insertion
&&
2160 (flags
& IBMVNIC_VLAN_STRIPPED
))
2161 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2162 ntohs(next
->rx_comp
.vlan_tci
));
2164 /* free the entry */
2165 next
->rx_comp
.first
= 0;
2166 remove_buff_from_pool(adapter
, rx_buff
);
2168 skb_put(skb
, length
);
2169 skb
->protocol
= eth_type_trans(skb
, netdev
);
2170 skb_record_rx_queue(skb
, scrq_num
);
2172 if (flags
& IBMVNIC_IP_CHKSUM_GOOD
&&
2173 flags
& IBMVNIC_TCP_UDP_CHKSUM_GOOD
) {
2174 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2178 napi_gro_receive(napi
, skb
); /* send it up */
2179 netdev
->stats
.rx_packets
++;
2180 netdev
->stats
.rx_bytes
+= length
;
2181 adapter
->rx_stats_buffers
[scrq_num
].packets
++;
2182 adapter
->rx_stats_buffers
[scrq_num
].bytes
+= length
;
2186 if (adapter
->state
!= VNIC_CLOSING
)
2187 replenish_rx_pool(adapter
, &adapter
->rx_pool
[scrq_num
]);
2189 if (frames_processed
< budget
) {
2190 enable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
2191 napi_complete_done(napi
, frames_processed
);
2192 if (pending_scrq(adapter
, adapter
->rx_scrq
[scrq_num
]) &&
2193 napi_reschedule(napi
)) {
2194 disable_scrq_irq(adapter
, adapter
->rx_scrq
[scrq_num
]);
2198 return frames_processed
;
2201 static int wait_for_reset(struct ibmvnic_adapter
*adapter
)
2205 adapter
->fallback
.mtu
= adapter
->req_mtu
;
2206 adapter
->fallback
.rx_queues
= adapter
->req_rx_queues
;
2207 adapter
->fallback
.tx_queues
= adapter
->req_tx_queues
;
2208 adapter
->fallback
.rx_entries
= adapter
->req_rx_add_entries_per_subcrq
;
2209 adapter
->fallback
.tx_entries
= adapter
->req_tx_entries_per_subcrq
;
2211 init_completion(&adapter
->reset_done
);
2212 adapter
->wait_for_reset
= true;
2213 rc
= ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
2216 wait_for_completion(&adapter
->reset_done
);
2219 if (adapter
->reset_done_rc
) {
2221 adapter
->desired
.mtu
= adapter
->fallback
.mtu
;
2222 adapter
->desired
.rx_queues
= adapter
->fallback
.rx_queues
;
2223 adapter
->desired
.tx_queues
= adapter
->fallback
.tx_queues
;
2224 adapter
->desired
.rx_entries
= adapter
->fallback
.rx_entries
;
2225 adapter
->desired
.tx_entries
= adapter
->fallback
.tx_entries
;
2227 init_completion(&adapter
->reset_done
);
2228 adapter
->wait_for_reset
= true;
2229 rc
= ibmvnic_reset(adapter
, VNIC_RESET_CHANGE_PARAM
);
2232 wait_for_completion(&adapter
->reset_done
);
2234 adapter
->wait_for_reset
= false;
2239 static int ibmvnic_change_mtu(struct net_device
*netdev
, int new_mtu
)
2241 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2243 adapter
->desired
.mtu
= new_mtu
+ ETH_HLEN
;
2245 return wait_for_reset(adapter
);
2248 static netdev_features_t
ibmvnic_features_check(struct sk_buff
*skb
,
2249 struct net_device
*dev
,
2250 netdev_features_t features
)
2252 /* Some backing hardware adapters can not
2253 * handle packets with a MSS less than 224
2254 * or with only one segment.
2256 if (skb_is_gso(skb
)) {
2257 if (skb_shinfo(skb
)->gso_size
< 224 ||
2258 skb_shinfo(skb
)->gso_segs
== 1)
2259 features
&= ~NETIF_F_GSO_MASK
;
2265 static const struct net_device_ops ibmvnic_netdev_ops
= {
2266 .ndo_open
= ibmvnic_open
,
2267 .ndo_stop
= ibmvnic_close
,
2268 .ndo_start_xmit
= ibmvnic_xmit
,
2269 .ndo_set_rx_mode
= ibmvnic_set_multi
,
2270 .ndo_set_mac_address
= ibmvnic_set_mac
,
2271 .ndo_validate_addr
= eth_validate_addr
,
2272 .ndo_tx_timeout
= ibmvnic_tx_timeout
,
2273 .ndo_change_mtu
= ibmvnic_change_mtu
,
2274 .ndo_features_check
= ibmvnic_features_check
,
2277 /* ethtool functions */
2279 static int ibmvnic_get_link_ksettings(struct net_device
*netdev
,
2280 struct ethtool_link_ksettings
*cmd
)
2282 u32 supported
, advertising
;
2284 supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
2286 advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
2288 cmd
->base
.speed
= SPEED_1000
;
2289 cmd
->base
.duplex
= DUPLEX_FULL
;
2290 cmd
->base
.port
= PORT_FIBRE
;
2291 cmd
->base
.phy_address
= 0;
2292 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
2294 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
2296 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
2302 static void ibmvnic_get_drvinfo(struct net_device
*netdev
,
2303 struct ethtool_drvinfo
*info
)
2305 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2307 strlcpy(info
->driver
, ibmvnic_driver_name
, sizeof(info
->driver
));
2308 strlcpy(info
->version
, IBMVNIC_DRIVER_VERSION
, sizeof(info
->version
));
2309 strlcpy(info
->fw_version
, adapter
->fw_version
,
2310 sizeof(info
->fw_version
));
2313 static u32
ibmvnic_get_msglevel(struct net_device
*netdev
)
2315 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2317 return adapter
->msg_enable
;
2320 static void ibmvnic_set_msglevel(struct net_device
*netdev
, u32 data
)
2322 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2324 adapter
->msg_enable
= data
;
2327 static u32
ibmvnic_get_link(struct net_device
*netdev
)
2329 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2331 /* Don't need to send a query because we request a logical link up at
2332 * init and then we wait for link state indications
2334 return adapter
->logical_link_state
;
2337 static void ibmvnic_get_ringparam(struct net_device
*netdev
,
2338 struct ethtool_ringparam
*ring
)
2340 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2342 if (adapter
->priv_flags
& IBMVNIC_USE_SERVER_MAXES
) {
2343 ring
->rx_max_pending
= adapter
->max_rx_add_entries_per_subcrq
;
2344 ring
->tx_max_pending
= adapter
->max_tx_entries_per_subcrq
;
2346 ring
->rx_max_pending
= IBMVNIC_MAX_QUEUE_SZ
;
2347 ring
->tx_max_pending
= IBMVNIC_MAX_QUEUE_SZ
;
2349 ring
->rx_mini_max_pending
= 0;
2350 ring
->rx_jumbo_max_pending
= 0;
2351 ring
->rx_pending
= adapter
->req_rx_add_entries_per_subcrq
;
2352 ring
->tx_pending
= adapter
->req_tx_entries_per_subcrq
;
2353 ring
->rx_mini_pending
= 0;
2354 ring
->rx_jumbo_pending
= 0;
2357 static int ibmvnic_set_ringparam(struct net_device
*netdev
,
2358 struct ethtool_ringparam
*ring
)
2360 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2364 adapter
->desired
.rx_entries
= ring
->rx_pending
;
2365 adapter
->desired
.tx_entries
= ring
->tx_pending
;
2367 ret
= wait_for_reset(adapter
);
2370 (adapter
->req_rx_add_entries_per_subcrq
!= ring
->rx_pending
||
2371 adapter
->req_tx_entries_per_subcrq
!= ring
->tx_pending
))
2373 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2374 ring
->rx_pending
, ring
->tx_pending
,
2375 adapter
->req_rx_add_entries_per_subcrq
,
2376 adapter
->req_tx_entries_per_subcrq
);
2380 static void ibmvnic_get_channels(struct net_device
*netdev
,
2381 struct ethtool_channels
*channels
)
2383 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2385 if (adapter
->priv_flags
& IBMVNIC_USE_SERVER_MAXES
) {
2386 channels
->max_rx
= adapter
->max_rx_queues
;
2387 channels
->max_tx
= adapter
->max_tx_queues
;
2389 channels
->max_rx
= IBMVNIC_MAX_QUEUES
;
2390 channels
->max_tx
= IBMVNIC_MAX_QUEUES
;
2393 channels
->max_other
= 0;
2394 channels
->max_combined
= 0;
2395 channels
->rx_count
= adapter
->req_rx_queues
;
2396 channels
->tx_count
= adapter
->req_tx_queues
;
2397 channels
->other_count
= 0;
2398 channels
->combined_count
= 0;
2401 static int ibmvnic_set_channels(struct net_device
*netdev
,
2402 struct ethtool_channels
*channels
)
2404 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2408 adapter
->desired
.rx_queues
= channels
->rx_count
;
2409 adapter
->desired
.tx_queues
= channels
->tx_count
;
2411 ret
= wait_for_reset(adapter
);
2414 (adapter
->req_rx_queues
!= channels
->rx_count
||
2415 adapter
->req_tx_queues
!= channels
->tx_count
))
2417 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2418 channels
->rx_count
, channels
->tx_count
,
2419 adapter
->req_rx_queues
, adapter
->req_tx_queues
);
2424 static void ibmvnic_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2426 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2429 switch (stringset
) {
2431 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
);
2432 i
++, data
+= ETH_GSTRING_LEN
)
2433 memcpy(data
, ibmvnic_stats
[i
].name
, ETH_GSTRING_LEN
);
2435 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2436 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_packets", i
);
2437 data
+= ETH_GSTRING_LEN
;
2439 snprintf(data
, ETH_GSTRING_LEN
, "tx%d_bytes", i
);
2440 data
+= ETH_GSTRING_LEN
;
2442 snprintf(data
, ETH_GSTRING_LEN
,
2443 "tx%d_dropped_packets", i
);
2444 data
+= ETH_GSTRING_LEN
;
2447 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2448 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_packets", i
);
2449 data
+= ETH_GSTRING_LEN
;
2451 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_bytes", i
);
2452 data
+= ETH_GSTRING_LEN
;
2454 snprintf(data
, ETH_GSTRING_LEN
, "rx%d_interrupts", i
);
2455 data
+= ETH_GSTRING_LEN
;
2459 case ETH_SS_PRIV_FLAGS
:
2460 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_priv_flags
); i
++)
2461 strcpy(data
+ i
* ETH_GSTRING_LEN
,
2462 ibmvnic_priv_flags
[i
]);
2469 static int ibmvnic_get_sset_count(struct net_device
*dev
, int sset
)
2471 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2475 return ARRAY_SIZE(ibmvnic_stats
) +
2476 adapter
->req_tx_queues
* NUM_TX_STATS
+
2477 adapter
->req_rx_queues
* NUM_RX_STATS
;
2478 case ETH_SS_PRIV_FLAGS
:
2479 return ARRAY_SIZE(ibmvnic_priv_flags
);
2485 static void ibmvnic_get_ethtool_stats(struct net_device
*dev
,
2486 struct ethtool_stats
*stats
, u64
*data
)
2488 struct ibmvnic_adapter
*adapter
= netdev_priv(dev
);
2489 union ibmvnic_crq crq
;
2493 memset(&crq
, 0, sizeof(crq
));
2494 crq
.request_statistics
.first
= IBMVNIC_CRQ_CMD
;
2495 crq
.request_statistics
.cmd
= REQUEST_STATISTICS
;
2496 crq
.request_statistics
.ioba
= cpu_to_be32(adapter
->stats_token
);
2497 crq
.request_statistics
.len
=
2498 cpu_to_be32(sizeof(struct ibmvnic_statistics
));
2500 /* Wait for data to be written */
2501 init_completion(&adapter
->stats_done
);
2502 rc
= ibmvnic_send_crq(adapter
, &crq
);
2505 wait_for_completion(&adapter
->stats_done
);
2507 for (i
= 0; i
< ARRAY_SIZE(ibmvnic_stats
); i
++)
2508 data
[i
] = be64_to_cpu(IBMVNIC_GET_STAT(adapter
,
2509 ibmvnic_stats
[i
].offset
));
2511 for (j
= 0; j
< adapter
->req_tx_queues
; j
++) {
2512 data
[i
] = adapter
->tx_stats_buffers
[j
].packets
;
2514 data
[i
] = adapter
->tx_stats_buffers
[j
].bytes
;
2516 data
[i
] = adapter
->tx_stats_buffers
[j
].dropped_packets
;
2520 for (j
= 0; j
< adapter
->req_rx_queues
; j
++) {
2521 data
[i
] = adapter
->rx_stats_buffers
[j
].packets
;
2523 data
[i
] = adapter
->rx_stats_buffers
[j
].bytes
;
2525 data
[i
] = adapter
->rx_stats_buffers
[j
].interrupts
;
2530 static u32
ibmvnic_get_priv_flags(struct net_device
*netdev
)
2532 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2534 return adapter
->priv_flags
;
2537 static int ibmvnic_set_priv_flags(struct net_device
*netdev
, u32 flags
)
2539 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
2540 bool which_maxes
= !!(flags
& IBMVNIC_USE_SERVER_MAXES
);
2543 adapter
->priv_flags
|= IBMVNIC_USE_SERVER_MAXES
;
2545 adapter
->priv_flags
&= ~IBMVNIC_USE_SERVER_MAXES
;
2549 static const struct ethtool_ops ibmvnic_ethtool_ops
= {
2550 .get_drvinfo
= ibmvnic_get_drvinfo
,
2551 .get_msglevel
= ibmvnic_get_msglevel
,
2552 .set_msglevel
= ibmvnic_set_msglevel
,
2553 .get_link
= ibmvnic_get_link
,
2554 .get_ringparam
= ibmvnic_get_ringparam
,
2555 .set_ringparam
= ibmvnic_set_ringparam
,
2556 .get_channels
= ibmvnic_get_channels
,
2557 .set_channels
= ibmvnic_set_channels
,
2558 .get_strings
= ibmvnic_get_strings
,
2559 .get_sset_count
= ibmvnic_get_sset_count
,
2560 .get_ethtool_stats
= ibmvnic_get_ethtool_stats
,
2561 .get_link_ksettings
= ibmvnic_get_link_ksettings
,
2562 .get_priv_flags
= ibmvnic_get_priv_flags
,
2563 .set_priv_flags
= ibmvnic_set_priv_flags
,
2566 /* Routines for managing CRQs/sCRQs */
2568 static int reset_one_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2569 struct ibmvnic_sub_crq_queue
*scrq
)
2574 free_irq(scrq
->irq
, scrq
);
2575 irq_dispose_mapping(scrq
->irq
);
2579 memset(scrq
->msgs
, 0, 4 * PAGE_SIZE
);
2580 atomic_set(&scrq
->used
, 0);
2583 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
2584 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
2588 static int reset_sub_crq_queues(struct ibmvnic_adapter
*adapter
)
2592 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2593 netdev_dbg(adapter
->netdev
, "Re-setting tx_scrq[%d]\n", i
);
2594 rc
= reset_one_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
]);
2599 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2600 netdev_dbg(adapter
->netdev
, "Re-setting rx_scrq[%d]\n", i
);
2601 rc
= reset_one_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
]);
2609 static void release_sub_crq_queue(struct ibmvnic_adapter
*adapter
,
2610 struct ibmvnic_sub_crq_queue
*scrq
,
2613 struct device
*dev
= &adapter
->vdev
->dev
;
2616 netdev_dbg(adapter
->netdev
, "Releasing sub-CRQ\n");
2619 /* Close the sub-crqs */
2621 rc
= plpar_hcall_norets(H_FREE_SUB_CRQ
,
2622 adapter
->vdev
->unit_address
,
2624 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
2627 netdev_err(adapter
->netdev
,
2628 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2633 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
2635 free_pages((unsigned long)scrq
->msgs
, 2);
2639 static struct ibmvnic_sub_crq_queue
*init_sub_crq_queue(struct ibmvnic_adapter
2642 struct device
*dev
= &adapter
->vdev
->dev
;
2643 struct ibmvnic_sub_crq_queue
*scrq
;
2646 scrq
= kzalloc(sizeof(*scrq
), GFP_KERNEL
);
2651 (union sub_crq
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 2);
2653 dev_warn(dev
, "Couldn't allocate crq queue messages page\n");
2654 goto zero_page_failed
;
2657 scrq
->msg_token
= dma_map_single(dev
, scrq
->msgs
, 4 * PAGE_SIZE
,
2659 if (dma_mapping_error(dev
, scrq
->msg_token
)) {
2660 dev_warn(dev
, "Couldn't map crq queue messages page\n");
2664 rc
= h_reg_sub_crq(adapter
->vdev
->unit_address
, scrq
->msg_token
,
2665 4 * PAGE_SIZE
, &scrq
->crq_num
, &scrq
->hw_irq
);
2667 if (rc
== H_RESOURCE
)
2668 rc
= ibmvnic_reset_crq(adapter
);
2670 if (rc
== H_CLOSED
) {
2671 dev_warn(dev
, "Partner adapter not ready, waiting.\n");
2673 dev_warn(dev
, "Error %d registering sub-crq\n", rc
);
2677 scrq
->adapter
= adapter
;
2678 scrq
->size
= 4 * PAGE_SIZE
/ sizeof(*scrq
->msgs
);
2679 spin_lock_init(&scrq
->lock
);
2681 netdev_dbg(adapter
->netdev
,
2682 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2683 scrq
->crq_num
, scrq
->hw_irq
, scrq
->irq
);
2688 dma_unmap_single(dev
, scrq
->msg_token
, 4 * PAGE_SIZE
,
2691 free_pages((unsigned long)scrq
->msgs
, 2);
2698 static void release_sub_crqs(struct ibmvnic_adapter
*adapter
, bool do_h_free
)
2702 if (adapter
->tx_scrq
) {
2703 for (i
= 0; i
< adapter
->num_active_tx_scrqs
; i
++) {
2704 if (!adapter
->tx_scrq
[i
])
2707 netdev_dbg(adapter
->netdev
, "Releasing tx_scrq[%d]\n",
2709 if (adapter
->tx_scrq
[i
]->irq
) {
2710 free_irq(adapter
->tx_scrq
[i
]->irq
,
2711 adapter
->tx_scrq
[i
]);
2712 irq_dispose_mapping(adapter
->tx_scrq
[i
]->irq
);
2713 adapter
->tx_scrq
[i
]->irq
= 0;
2716 release_sub_crq_queue(adapter
, adapter
->tx_scrq
[i
],
2720 kfree(adapter
->tx_scrq
);
2721 adapter
->tx_scrq
= NULL
;
2722 adapter
->num_active_tx_scrqs
= 0;
2725 if (adapter
->rx_scrq
) {
2726 for (i
= 0; i
< adapter
->num_active_rx_scrqs
; i
++) {
2727 if (!adapter
->rx_scrq
[i
])
2730 netdev_dbg(adapter
->netdev
, "Releasing rx_scrq[%d]\n",
2732 if (adapter
->rx_scrq
[i
]->irq
) {
2733 free_irq(adapter
->rx_scrq
[i
]->irq
,
2734 adapter
->rx_scrq
[i
]);
2735 irq_dispose_mapping(adapter
->rx_scrq
[i
]->irq
);
2736 adapter
->rx_scrq
[i
]->irq
= 0;
2739 release_sub_crq_queue(adapter
, adapter
->rx_scrq
[i
],
2743 kfree(adapter
->rx_scrq
);
2744 adapter
->rx_scrq
= NULL
;
2745 adapter
->num_active_rx_scrqs
= 0;
2749 static int disable_scrq_irq(struct ibmvnic_adapter
*adapter
,
2750 struct ibmvnic_sub_crq_queue
*scrq
)
2752 struct device
*dev
= &adapter
->vdev
->dev
;
2755 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
2756 H_DISABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
2758 dev_err(dev
, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2763 static int enable_scrq_irq(struct ibmvnic_adapter
*adapter
,
2764 struct ibmvnic_sub_crq_queue
*scrq
)
2766 struct device
*dev
= &adapter
->vdev
->dev
;
2769 if (scrq
->hw_irq
> 0x100000000ULL
) {
2770 dev_err(dev
, "bad hw_irq = %lx\n", scrq
->hw_irq
);
2774 if (adapter
->resetting
&&
2775 adapter
->reset_reason
== VNIC_RESET_MOBILITY
) {
2776 u64 val
= (0xff000000) | scrq
->hw_irq
;
2778 rc
= plpar_hcall_norets(H_EOI
, val
);
2780 dev_err(dev
, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2784 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
2785 H_ENABLE_VIO_INTERRUPT
, scrq
->hw_irq
, 0, 0);
2787 dev_err(dev
, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2792 static int ibmvnic_complete_tx(struct ibmvnic_adapter
*adapter
,
2793 struct ibmvnic_sub_crq_queue
*scrq
)
2795 struct device
*dev
= &adapter
->vdev
->dev
;
2796 struct ibmvnic_tx_pool
*tx_pool
;
2797 struct ibmvnic_tx_buff
*txbuff
;
2798 union sub_crq
*next
;
2804 while (pending_scrq(adapter
, scrq
)) {
2805 unsigned int pool
= scrq
->pool_index
;
2806 int num_entries
= 0;
2808 next
= ibmvnic_next_scrq(adapter
, scrq
);
2809 for (i
= 0; i
< next
->tx_comp
.num_comps
; i
++) {
2810 if (next
->tx_comp
.rcs
[i
]) {
2811 dev_err(dev
, "tx error %x\n",
2812 next
->tx_comp
.rcs
[i
]);
2815 index
= be32_to_cpu(next
->tx_comp
.correlators
[i
]);
2816 if (index
& IBMVNIC_TSO_POOL_MASK
) {
2817 tx_pool
= &adapter
->tso_pool
[pool
];
2818 index
&= ~IBMVNIC_TSO_POOL_MASK
;
2820 tx_pool
= &adapter
->tx_pool
[pool
];
2823 txbuff
= &tx_pool
->tx_buff
[index
];
2825 for (j
= 0; j
< IBMVNIC_MAX_FRAGS_PER_CRQ
; j
++) {
2826 if (!txbuff
->data_dma
[j
])
2829 txbuff
->data_dma
[j
] = 0;
2831 /* if sub_crq was sent indirectly */
2832 first
= &txbuff
->indir_arr
[0].generic
.first
;
2833 if (*first
== IBMVNIC_CRQ_CMD
) {
2834 dma_unmap_single(dev
, txbuff
->indir_dma
,
2835 sizeof(txbuff
->indir_arr
),
2840 if (txbuff
->last_frag
) {
2841 dev_kfree_skb_any(txbuff
->skb
);
2845 num_entries
+= txbuff
->num_entries
;
2847 tx_pool
->free_map
[tx_pool
->producer_index
] = index
;
2848 tx_pool
->producer_index
=
2849 (tx_pool
->producer_index
+ 1) %
2850 tx_pool
->num_buffers
;
2852 /* remove tx_comp scrq*/
2853 next
->tx_comp
.first
= 0;
2855 if (atomic_sub_return(num_entries
, &scrq
->used
) <=
2856 (adapter
->req_tx_entries_per_subcrq
/ 2) &&
2857 __netif_subqueue_stopped(adapter
->netdev
,
2858 scrq
->pool_index
)) {
2859 netif_wake_subqueue(adapter
->netdev
, scrq
->pool_index
);
2860 netdev_dbg(adapter
->netdev
, "Started queue %d\n",
2865 enable_scrq_irq(adapter
, scrq
);
2867 if (pending_scrq(adapter
, scrq
)) {
2868 disable_scrq_irq(adapter
, scrq
);
2875 static irqreturn_t
ibmvnic_interrupt_tx(int irq
, void *instance
)
2877 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2878 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2880 disable_scrq_irq(adapter
, scrq
);
2881 ibmvnic_complete_tx(adapter
, scrq
);
2886 static irqreturn_t
ibmvnic_interrupt_rx(int irq
, void *instance
)
2888 struct ibmvnic_sub_crq_queue
*scrq
= instance
;
2889 struct ibmvnic_adapter
*adapter
= scrq
->adapter
;
2891 /* When booting a kdump kernel we can hit pending interrupts
2892 * prior to completing driver initialization.
2894 if (unlikely(adapter
->state
!= VNIC_OPEN
))
2897 adapter
->rx_stats_buffers
[scrq
->scrq_num
].interrupts
++;
2899 if (napi_schedule_prep(&adapter
->napi
[scrq
->scrq_num
])) {
2900 disable_scrq_irq(adapter
, scrq
);
2901 __napi_schedule(&adapter
->napi
[scrq
->scrq_num
]);
2907 static int init_sub_crq_irqs(struct ibmvnic_adapter
*adapter
)
2909 struct device
*dev
= &adapter
->vdev
->dev
;
2910 struct ibmvnic_sub_crq_queue
*scrq
;
2914 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
2915 netdev_dbg(adapter
->netdev
, "Initializing tx_scrq[%d] irq\n",
2917 scrq
= adapter
->tx_scrq
[i
];
2918 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2922 dev_err(dev
, "Error mapping irq\n");
2923 goto req_tx_irq_failed
;
2926 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_tx
,
2927 0, "ibmvnic_tx", scrq
);
2930 dev_err(dev
, "Couldn't register tx irq 0x%x. rc=%d\n",
2932 irq_dispose_mapping(scrq
->irq
);
2933 goto req_tx_irq_failed
;
2937 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
2938 netdev_dbg(adapter
->netdev
, "Initializing rx_scrq[%d] irq\n",
2940 scrq
= adapter
->rx_scrq
[i
];
2941 scrq
->irq
= irq_create_mapping(NULL
, scrq
->hw_irq
);
2944 dev_err(dev
, "Error mapping irq\n");
2945 goto req_rx_irq_failed
;
2947 rc
= request_irq(scrq
->irq
, ibmvnic_interrupt_rx
,
2948 0, "ibmvnic_rx", scrq
);
2950 dev_err(dev
, "Couldn't register rx irq 0x%x. rc=%d\n",
2952 irq_dispose_mapping(scrq
->irq
);
2953 goto req_rx_irq_failed
;
2959 for (j
= 0; j
< i
; j
++) {
2960 free_irq(adapter
->rx_scrq
[j
]->irq
, adapter
->rx_scrq
[j
]);
2961 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2963 i
= adapter
->req_tx_queues
;
2965 for (j
= 0; j
< i
; j
++) {
2966 free_irq(adapter
->tx_scrq
[j
]->irq
, adapter
->tx_scrq
[j
]);
2967 irq_dispose_mapping(adapter
->rx_scrq
[j
]->irq
);
2969 release_sub_crqs(adapter
, 1);
2973 static int init_sub_crqs(struct ibmvnic_adapter
*adapter
)
2975 struct device
*dev
= &adapter
->vdev
->dev
;
2976 struct ibmvnic_sub_crq_queue
**allqueues
;
2977 int registered_queues
= 0;
2982 total_queues
= adapter
->req_tx_queues
+ adapter
->req_rx_queues
;
2984 allqueues
= kcalloc(total_queues
, sizeof(*allqueues
), GFP_KERNEL
);
2988 for (i
= 0; i
< total_queues
; i
++) {
2989 allqueues
[i
] = init_sub_crq_queue(adapter
);
2990 if (!allqueues
[i
]) {
2991 dev_warn(dev
, "Couldn't allocate all sub-crqs\n");
2994 registered_queues
++;
2997 /* Make sure we were able to register the minimum number of queues */
2998 if (registered_queues
<
2999 adapter
->min_tx_queues
+ adapter
->min_rx_queues
) {
3000 dev_err(dev
, "Fatal: Couldn't init min number of sub-crqs\n");
3004 /* Distribute the failed allocated queues*/
3005 for (i
= 0; i
< total_queues
- registered_queues
+ more
; i
++) {
3006 netdev_dbg(adapter
->netdev
, "Reducing number of queues\n");
3009 if (adapter
->req_rx_queues
> adapter
->min_rx_queues
)
3010 adapter
->req_rx_queues
--;
3015 if (adapter
->req_tx_queues
> adapter
->min_tx_queues
)
3016 adapter
->req_tx_queues
--;
3023 adapter
->tx_scrq
= kcalloc(adapter
->req_tx_queues
,
3024 sizeof(*adapter
->tx_scrq
), GFP_KERNEL
);
3025 if (!adapter
->tx_scrq
)
3028 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3029 adapter
->tx_scrq
[i
] = allqueues
[i
];
3030 adapter
->tx_scrq
[i
]->pool_index
= i
;
3031 adapter
->num_active_tx_scrqs
++;
3034 adapter
->rx_scrq
= kcalloc(adapter
->req_rx_queues
,
3035 sizeof(*adapter
->rx_scrq
), GFP_KERNEL
);
3036 if (!adapter
->rx_scrq
)
3039 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3040 adapter
->rx_scrq
[i
] = allqueues
[i
+ adapter
->req_tx_queues
];
3041 adapter
->rx_scrq
[i
]->scrq_num
= i
;
3042 adapter
->num_active_rx_scrqs
++;
3049 kfree(adapter
->tx_scrq
);
3050 adapter
->tx_scrq
= NULL
;
3052 for (i
= 0; i
< registered_queues
; i
++)
3053 release_sub_crq_queue(adapter
, allqueues
[i
], 1);
3058 static void ibmvnic_send_req_caps(struct ibmvnic_adapter
*adapter
, int retry
)
3060 struct device
*dev
= &adapter
->vdev
->dev
;
3061 union ibmvnic_crq crq
;
3065 /* Sub-CRQ entries are 32 byte long */
3066 int entries_page
= 4 * PAGE_SIZE
/ (sizeof(u64
) * 4);
3068 if (adapter
->min_tx_entries_per_subcrq
> entries_page
||
3069 adapter
->min_rx_add_entries_per_subcrq
> entries_page
) {
3070 dev_err(dev
, "Fatal, invalid entries per sub-crq\n");
3074 if (adapter
->desired
.mtu
)
3075 adapter
->req_mtu
= adapter
->desired
.mtu
;
3077 adapter
->req_mtu
= adapter
->netdev
->mtu
+ ETH_HLEN
;
3079 if (!adapter
->desired
.tx_entries
)
3080 adapter
->desired
.tx_entries
=
3081 adapter
->max_tx_entries_per_subcrq
;
3082 if (!adapter
->desired
.rx_entries
)
3083 adapter
->desired
.rx_entries
=
3084 adapter
->max_rx_add_entries_per_subcrq
;
3086 max_entries
= IBMVNIC_MAX_LTB_SIZE
/
3087 (adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
);
3089 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
3090 adapter
->desired
.tx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
3091 adapter
->desired
.tx_entries
= max_entries
;
3094 if ((adapter
->req_mtu
+ IBMVNIC_BUFFER_HLEN
) *
3095 adapter
->desired
.rx_entries
> IBMVNIC_MAX_LTB_SIZE
) {
3096 adapter
->desired
.rx_entries
= max_entries
;
3099 if (adapter
->desired
.tx_entries
)
3100 adapter
->req_tx_entries_per_subcrq
=
3101 adapter
->desired
.tx_entries
;
3103 adapter
->req_tx_entries_per_subcrq
=
3104 adapter
->max_tx_entries_per_subcrq
;
3106 if (adapter
->desired
.rx_entries
)
3107 adapter
->req_rx_add_entries_per_subcrq
=
3108 adapter
->desired
.rx_entries
;
3110 adapter
->req_rx_add_entries_per_subcrq
=
3111 adapter
->max_rx_add_entries_per_subcrq
;
3113 if (adapter
->desired
.tx_queues
)
3114 adapter
->req_tx_queues
=
3115 adapter
->desired
.tx_queues
;
3117 adapter
->req_tx_queues
=
3118 adapter
->opt_tx_comp_sub_queues
;
3120 if (adapter
->desired
.rx_queues
)
3121 adapter
->req_rx_queues
=
3122 adapter
->desired
.rx_queues
;
3124 adapter
->req_rx_queues
=
3125 adapter
->opt_rx_comp_queues
;
3127 adapter
->req_rx_add_queues
= adapter
->max_rx_add_queues
;
3130 memset(&crq
, 0, sizeof(crq
));
3131 crq
.request_capability
.first
= IBMVNIC_CRQ_CMD
;
3132 crq
.request_capability
.cmd
= REQUEST_CAPABILITY
;
3134 crq
.request_capability
.capability
= cpu_to_be16(REQ_TX_QUEUES
);
3135 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_tx_queues
);
3136 atomic_inc(&adapter
->running_cap_crqs
);
3137 ibmvnic_send_crq(adapter
, &crq
);
3139 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_QUEUES
);
3140 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_queues
);
3141 atomic_inc(&adapter
->running_cap_crqs
);
3142 ibmvnic_send_crq(adapter
, &crq
);
3144 crq
.request_capability
.capability
= cpu_to_be16(REQ_RX_ADD_QUEUES
);
3145 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_rx_add_queues
);
3146 atomic_inc(&adapter
->running_cap_crqs
);
3147 ibmvnic_send_crq(adapter
, &crq
);
3149 crq
.request_capability
.capability
=
3150 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ
);
3151 crq
.request_capability
.number
=
3152 cpu_to_be64(adapter
->req_tx_entries_per_subcrq
);
3153 atomic_inc(&adapter
->running_cap_crqs
);
3154 ibmvnic_send_crq(adapter
, &crq
);
3156 crq
.request_capability
.capability
=
3157 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ
);
3158 crq
.request_capability
.number
=
3159 cpu_to_be64(adapter
->req_rx_add_entries_per_subcrq
);
3160 atomic_inc(&adapter
->running_cap_crqs
);
3161 ibmvnic_send_crq(adapter
, &crq
);
3163 crq
.request_capability
.capability
= cpu_to_be16(REQ_MTU
);
3164 crq
.request_capability
.number
= cpu_to_be64(adapter
->req_mtu
);
3165 atomic_inc(&adapter
->running_cap_crqs
);
3166 ibmvnic_send_crq(adapter
, &crq
);
3168 if (adapter
->netdev
->flags
& IFF_PROMISC
) {
3169 if (adapter
->promisc_supported
) {
3170 crq
.request_capability
.capability
=
3171 cpu_to_be16(PROMISC_REQUESTED
);
3172 crq
.request_capability
.number
= cpu_to_be64(1);
3173 atomic_inc(&adapter
->running_cap_crqs
);
3174 ibmvnic_send_crq(adapter
, &crq
);
3177 crq
.request_capability
.capability
=
3178 cpu_to_be16(PROMISC_REQUESTED
);
3179 crq
.request_capability
.number
= cpu_to_be64(0);
3180 atomic_inc(&adapter
->running_cap_crqs
);
3181 ibmvnic_send_crq(adapter
, &crq
);
3185 static int pending_scrq(struct ibmvnic_adapter
*adapter
,
3186 struct ibmvnic_sub_crq_queue
*scrq
)
3188 union sub_crq
*entry
= &scrq
->msgs
[scrq
->cur
];
3190 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
)
3196 static union sub_crq
*ibmvnic_next_scrq(struct ibmvnic_adapter
*adapter
,
3197 struct ibmvnic_sub_crq_queue
*scrq
)
3199 union sub_crq
*entry
;
3200 unsigned long flags
;
3202 spin_lock_irqsave(&scrq
->lock
, flags
);
3203 entry
= &scrq
->msgs
[scrq
->cur
];
3204 if (entry
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
3205 if (++scrq
->cur
== scrq
->size
)
3210 spin_unlock_irqrestore(&scrq
->lock
, flags
);
3215 static union ibmvnic_crq
*ibmvnic_next_crq(struct ibmvnic_adapter
*adapter
)
3217 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
3218 union ibmvnic_crq
*crq
;
3220 crq
= &queue
->msgs
[queue
->cur
];
3221 if (crq
->generic
.first
& IBMVNIC_CRQ_CMD_RSP
) {
3222 if (++queue
->cur
== queue
->size
)
3231 static void print_subcrq_error(struct device
*dev
, int rc
, const char *func
)
3235 dev_warn_ratelimited(dev
,
3236 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3240 dev_warn_ratelimited(dev
,
3241 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3245 dev_err_ratelimited(dev
, "%s failed: (rc=%d)\n", func
, rc
);
3250 static int send_subcrq(struct ibmvnic_adapter
*adapter
, u64 remote_handle
,
3251 union sub_crq
*sub_crq
)
3253 unsigned int ua
= adapter
->vdev
->unit_address
;
3254 struct device
*dev
= &adapter
->vdev
->dev
;
3255 u64
*u64_crq
= (u64
*)sub_crq
;
3258 netdev_dbg(adapter
->netdev
,
3259 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3260 (unsigned long int)cpu_to_be64(remote_handle
),
3261 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3262 (unsigned long int)cpu_to_be64(u64_crq
[1]),
3263 (unsigned long int)cpu_to_be64(u64_crq
[2]),
3264 (unsigned long int)cpu_to_be64(u64_crq
[3]));
3266 /* Make sure the hypervisor sees the complete request */
3269 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ
, ua
,
3270 cpu_to_be64(remote_handle
),
3271 cpu_to_be64(u64_crq
[0]),
3272 cpu_to_be64(u64_crq
[1]),
3273 cpu_to_be64(u64_crq
[2]),
3274 cpu_to_be64(u64_crq
[3]));
3277 print_subcrq_error(dev
, rc
, __func__
);
3282 static int send_subcrq_indirect(struct ibmvnic_adapter
*adapter
,
3283 u64 remote_handle
, u64 ioba
, u64 num_entries
)
3285 unsigned int ua
= adapter
->vdev
->unit_address
;
3286 struct device
*dev
= &adapter
->vdev
->dev
;
3289 /* Make sure the hypervisor sees the complete request */
3291 rc
= plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT
, ua
,
3292 cpu_to_be64(remote_handle
),
3296 print_subcrq_error(dev
, rc
, __func__
);
3301 static int ibmvnic_send_crq(struct ibmvnic_adapter
*adapter
,
3302 union ibmvnic_crq
*crq
)
3304 unsigned int ua
= adapter
->vdev
->unit_address
;
3305 struct device
*dev
= &adapter
->vdev
->dev
;
3306 u64
*u64_crq
= (u64
*)crq
;
3309 netdev_dbg(adapter
->netdev
, "Sending CRQ: %016lx %016lx\n",
3310 (unsigned long int)cpu_to_be64(u64_crq
[0]),
3311 (unsigned long int)cpu_to_be64(u64_crq
[1]));
3313 if (!adapter
->crq
.active
&&
3314 crq
->generic
.first
!= IBMVNIC_CRQ_INIT_CMD
) {
3315 dev_warn(dev
, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3319 /* Make sure the hypervisor sees the complete request */
3322 rc
= plpar_hcall_norets(H_SEND_CRQ
, ua
,
3323 cpu_to_be64(u64_crq
[0]),
3324 cpu_to_be64(u64_crq
[1]));
3327 if (rc
== H_CLOSED
) {
3328 dev_warn(dev
, "CRQ Queue closed\n");
3329 if (adapter
->resetting
)
3330 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3333 dev_warn(dev
, "Send error (rc=%d)\n", rc
);
3339 static int ibmvnic_send_crq_init(struct ibmvnic_adapter
*adapter
)
3341 union ibmvnic_crq crq
;
3343 memset(&crq
, 0, sizeof(crq
));
3344 crq
.generic
.first
= IBMVNIC_CRQ_INIT_CMD
;
3345 crq
.generic
.cmd
= IBMVNIC_CRQ_INIT
;
3346 netdev_dbg(adapter
->netdev
, "Sending CRQ init\n");
3348 return ibmvnic_send_crq(adapter
, &crq
);
3351 static int send_version_xchg(struct ibmvnic_adapter
*adapter
)
3353 union ibmvnic_crq crq
;
3355 memset(&crq
, 0, sizeof(crq
));
3356 crq
.version_exchange
.first
= IBMVNIC_CRQ_CMD
;
3357 crq
.version_exchange
.cmd
= VERSION_EXCHANGE
;
3358 crq
.version_exchange
.version
= cpu_to_be16(ibmvnic_version
);
3360 return ibmvnic_send_crq(adapter
, &crq
);
3363 struct vnic_login_client_data
{
3369 static int vnic_client_data_len(struct ibmvnic_adapter
*adapter
)
3373 /* Calculate the amount of buffer space needed for the
3374 * vnic client data in the login buffer. There are four entries,
3375 * OS name, LPAR name, device name, and a null last entry.
3377 len
= 4 * sizeof(struct vnic_login_client_data
);
3378 len
+= 6; /* "Linux" plus NULL */
3379 len
+= strlen(utsname()->nodename
) + 1;
3380 len
+= strlen(adapter
->netdev
->name
) + 1;
3385 static void vnic_add_client_data(struct ibmvnic_adapter
*adapter
,
3386 struct vnic_login_client_data
*vlcd
)
3388 const char *os_name
= "Linux";
3391 /* Type 1 - LPAR OS */
3393 len
= strlen(os_name
) + 1;
3394 vlcd
->len
= cpu_to_be16(len
);
3395 strncpy(vlcd
->name
, os_name
, len
);
3396 vlcd
= (struct vnic_login_client_data
*)(vlcd
->name
+ len
);
3398 /* Type 2 - LPAR name */
3400 len
= strlen(utsname()->nodename
) + 1;
3401 vlcd
->len
= cpu_to_be16(len
);
3402 strncpy(vlcd
->name
, utsname()->nodename
, len
);
3403 vlcd
= (struct vnic_login_client_data
*)(vlcd
->name
+ len
);
3405 /* Type 3 - device name */
3407 len
= strlen(adapter
->netdev
->name
) + 1;
3408 vlcd
->len
= cpu_to_be16(len
);
3409 strncpy(vlcd
->name
, adapter
->netdev
->name
, len
);
3412 static int send_login(struct ibmvnic_adapter
*adapter
)
3414 struct ibmvnic_login_rsp_buffer
*login_rsp_buffer
;
3415 struct ibmvnic_login_buffer
*login_buffer
;
3416 struct device
*dev
= &adapter
->vdev
->dev
;
3417 dma_addr_t rsp_buffer_token
;
3418 dma_addr_t buffer_token
;
3419 size_t rsp_buffer_size
;
3420 union ibmvnic_crq crq
;
3424 int client_data_len
;
3425 struct vnic_login_client_data
*vlcd
;
3428 if (!adapter
->tx_scrq
|| !adapter
->rx_scrq
) {
3429 netdev_err(adapter
->netdev
,
3430 "RX or TX queues are not allocated, device login failed\n");
3434 release_login_rsp_buffer(adapter
);
3435 client_data_len
= vnic_client_data_len(adapter
);
3438 sizeof(struct ibmvnic_login_buffer
) +
3439 sizeof(u64
) * (adapter
->req_tx_queues
+ adapter
->req_rx_queues
) +
3442 login_buffer
= kzalloc(buffer_size
, GFP_ATOMIC
);
3444 goto buf_alloc_failed
;
3446 buffer_token
= dma_map_single(dev
, login_buffer
, buffer_size
,
3448 if (dma_mapping_error(dev
, buffer_token
)) {
3449 dev_err(dev
, "Couldn't map login buffer\n");
3450 goto buf_map_failed
;
3453 rsp_buffer_size
= sizeof(struct ibmvnic_login_rsp_buffer
) +
3454 sizeof(u64
) * adapter
->req_tx_queues
+
3455 sizeof(u64
) * adapter
->req_rx_queues
+
3456 sizeof(u64
) * adapter
->req_rx_queues
+
3457 sizeof(u8
) * IBMVNIC_TX_DESC_VERSIONS
;
3459 login_rsp_buffer
= kmalloc(rsp_buffer_size
, GFP_ATOMIC
);
3460 if (!login_rsp_buffer
)
3461 goto buf_rsp_alloc_failed
;
3463 rsp_buffer_token
= dma_map_single(dev
, login_rsp_buffer
,
3464 rsp_buffer_size
, DMA_FROM_DEVICE
);
3465 if (dma_mapping_error(dev
, rsp_buffer_token
)) {
3466 dev_err(dev
, "Couldn't map login rsp buffer\n");
3467 goto buf_rsp_map_failed
;
3470 adapter
->login_buf
= login_buffer
;
3471 adapter
->login_buf_token
= buffer_token
;
3472 adapter
->login_buf_sz
= buffer_size
;
3473 adapter
->login_rsp_buf
= login_rsp_buffer
;
3474 adapter
->login_rsp_buf_token
= rsp_buffer_token
;
3475 adapter
->login_rsp_buf_sz
= rsp_buffer_size
;
3477 login_buffer
->len
= cpu_to_be32(buffer_size
);
3478 login_buffer
->version
= cpu_to_be32(INITIAL_VERSION_LB
);
3479 login_buffer
->num_txcomp_subcrqs
= cpu_to_be32(adapter
->req_tx_queues
);
3480 login_buffer
->off_txcomp_subcrqs
=
3481 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
));
3482 login_buffer
->num_rxcomp_subcrqs
= cpu_to_be32(adapter
->req_rx_queues
);
3483 login_buffer
->off_rxcomp_subcrqs
=
3484 cpu_to_be32(sizeof(struct ibmvnic_login_buffer
) +
3485 sizeof(u64
) * adapter
->req_tx_queues
);
3486 login_buffer
->login_rsp_ioba
= cpu_to_be32(rsp_buffer_token
);
3487 login_buffer
->login_rsp_len
= cpu_to_be32(rsp_buffer_size
);
3489 tx_list_p
= (__be64
*)((char *)login_buffer
+
3490 sizeof(struct ibmvnic_login_buffer
));
3491 rx_list_p
= (__be64
*)((char *)login_buffer
+
3492 sizeof(struct ibmvnic_login_buffer
) +
3493 sizeof(u64
) * adapter
->req_tx_queues
);
3495 for (i
= 0; i
< adapter
->req_tx_queues
; i
++) {
3496 if (adapter
->tx_scrq
[i
]) {
3497 tx_list_p
[i
] = cpu_to_be64(adapter
->tx_scrq
[i
]->
3502 for (i
= 0; i
< adapter
->req_rx_queues
; i
++) {
3503 if (adapter
->rx_scrq
[i
]) {
3504 rx_list_p
[i
] = cpu_to_be64(adapter
->rx_scrq
[i
]->
3509 /* Insert vNIC login client data */
3510 vlcd
= (struct vnic_login_client_data
*)
3511 ((char *)rx_list_p
+ (sizeof(u64
) * adapter
->req_rx_queues
));
3512 login_buffer
->client_data_offset
=
3513 cpu_to_be32((char *)vlcd
- (char *)login_buffer
);
3514 login_buffer
->client_data_len
= cpu_to_be32(client_data_len
);
3516 vnic_add_client_data(adapter
, vlcd
);
3518 netdev_dbg(adapter
->netdev
, "Login Buffer:\n");
3519 for (i
= 0; i
< (adapter
->login_buf_sz
- 1) / 8 + 1; i
++) {
3520 netdev_dbg(adapter
->netdev
, "%016lx\n",
3521 ((unsigned long int *)(adapter
->login_buf
))[i
]);
3524 memset(&crq
, 0, sizeof(crq
));
3525 crq
.login
.first
= IBMVNIC_CRQ_CMD
;
3526 crq
.login
.cmd
= LOGIN
;
3527 crq
.login
.ioba
= cpu_to_be32(buffer_token
);
3528 crq
.login
.len
= cpu_to_be32(buffer_size
);
3529 ibmvnic_send_crq(adapter
, &crq
);
3534 kfree(login_rsp_buffer
);
3535 buf_rsp_alloc_failed
:
3536 dma_unmap_single(dev
, buffer_token
, buffer_size
, DMA_TO_DEVICE
);
3538 kfree(login_buffer
);
3543 static int send_request_map(struct ibmvnic_adapter
*adapter
, dma_addr_t addr
,
3546 union ibmvnic_crq crq
;
3548 memset(&crq
, 0, sizeof(crq
));
3549 crq
.request_map
.first
= IBMVNIC_CRQ_CMD
;
3550 crq
.request_map
.cmd
= REQUEST_MAP
;
3551 crq
.request_map
.map_id
= map_id
;
3552 crq
.request_map
.ioba
= cpu_to_be32(addr
);
3553 crq
.request_map
.len
= cpu_to_be32(len
);
3554 return ibmvnic_send_crq(adapter
, &crq
);
3557 static int send_request_unmap(struct ibmvnic_adapter
*adapter
, u8 map_id
)
3559 union ibmvnic_crq crq
;
3561 memset(&crq
, 0, sizeof(crq
));
3562 crq
.request_unmap
.first
= IBMVNIC_CRQ_CMD
;
3563 crq
.request_unmap
.cmd
= REQUEST_UNMAP
;
3564 crq
.request_unmap
.map_id
= map_id
;
3565 return ibmvnic_send_crq(adapter
, &crq
);
3568 static void send_map_query(struct ibmvnic_adapter
*adapter
)
3570 union ibmvnic_crq crq
;
3572 memset(&crq
, 0, sizeof(crq
));
3573 crq
.query_map
.first
= IBMVNIC_CRQ_CMD
;
3574 crq
.query_map
.cmd
= QUERY_MAP
;
3575 ibmvnic_send_crq(adapter
, &crq
);
3578 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3579 static void send_cap_queries(struct ibmvnic_adapter
*adapter
)
3581 union ibmvnic_crq crq
;
3583 atomic_set(&adapter
->running_cap_crqs
, 0);
3584 memset(&crq
, 0, sizeof(crq
));
3585 crq
.query_capability
.first
= IBMVNIC_CRQ_CMD
;
3586 crq
.query_capability
.cmd
= QUERY_CAPABILITY
;
3588 crq
.query_capability
.capability
= cpu_to_be16(MIN_TX_QUEUES
);
3589 atomic_inc(&adapter
->running_cap_crqs
);
3590 ibmvnic_send_crq(adapter
, &crq
);
3592 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_QUEUES
);
3593 atomic_inc(&adapter
->running_cap_crqs
);
3594 ibmvnic_send_crq(adapter
, &crq
);
3596 crq
.query_capability
.capability
= cpu_to_be16(MIN_RX_ADD_QUEUES
);
3597 atomic_inc(&adapter
->running_cap_crqs
);
3598 ibmvnic_send_crq(adapter
, &crq
);
3600 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_QUEUES
);
3601 atomic_inc(&adapter
->running_cap_crqs
);
3602 ibmvnic_send_crq(adapter
, &crq
);
3604 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_QUEUES
);
3605 atomic_inc(&adapter
->running_cap_crqs
);
3606 ibmvnic_send_crq(adapter
, &crq
);
3608 crq
.query_capability
.capability
= cpu_to_be16(MAX_RX_ADD_QUEUES
);
3609 atomic_inc(&adapter
->running_cap_crqs
);
3610 ibmvnic_send_crq(adapter
, &crq
);
3612 crq
.query_capability
.capability
=
3613 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ
);
3614 atomic_inc(&adapter
->running_cap_crqs
);
3615 ibmvnic_send_crq(adapter
, &crq
);
3617 crq
.query_capability
.capability
=
3618 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ
);
3619 atomic_inc(&adapter
->running_cap_crqs
);
3620 ibmvnic_send_crq(adapter
, &crq
);
3622 crq
.query_capability
.capability
=
3623 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ
);
3624 atomic_inc(&adapter
->running_cap_crqs
);
3625 ibmvnic_send_crq(adapter
, &crq
);
3627 crq
.query_capability
.capability
=
3628 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ
);
3629 atomic_inc(&adapter
->running_cap_crqs
);
3630 ibmvnic_send_crq(adapter
, &crq
);
3632 crq
.query_capability
.capability
= cpu_to_be16(TCP_IP_OFFLOAD
);
3633 atomic_inc(&adapter
->running_cap_crqs
);
3634 ibmvnic_send_crq(adapter
, &crq
);
3636 crq
.query_capability
.capability
= cpu_to_be16(PROMISC_SUPPORTED
);
3637 atomic_inc(&adapter
->running_cap_crqs
);
3638 ibmvnic_send_crq(adapter
, &crq
);
3640 crq
.query_capability
.capability
= cpu_to_be16(MIN_MTU
);
3641 atomic_inc(&adapter
->running_cap_crqs
);
3642 ibmvnic_send_crq(adapter
, &crq
);
3644 crq
.query_capability
.capability
= cpu_to_be16(MAX_MTU
);
3645 atomic_inc(&adapter
->running_cap_crqs
);
3646 ibmvnic_send_crq(adapter
, &crq
);
3648 crq
.query_capability
.capability
= cpu_to_be16(MAX_MULTICAST_FILTERS
);
3649 atomic_inc(&adapter
->running_cap_crqs
);
3650 ibmvnic_send_crq(adapter
, &crq
);
3652 crq
.query_capability
.capability
= cpu_to_be16(VLAN_HEADER_INSERTION
);
3653 atomic_inc(&adapter
->running_cap_crqs
);
3654 ibmvnic_send_crq(adapter
, &crq
);
3656 crq
.query_capability
.capability
= cpu_to_be16(RX_VLAN_HEADER_INSERTION
);
3657 atomic_inc(&adapter
->running_cap_crqs
);
3658 ibmvnic_send_crq(adapter
, &crq
);
3660 crq
.query_capability
.capability
= cpu_to_be16(MAX_TX_SG_ENTRIES
);
3661 atomic_inc(&adapter
->running_cap_crqs
);
3662 ibmvnic_send_crq(adapter
, &crq
);
3664 crq
.query_capability
.capability
= cpu_to_be16(RX_SG_SUPPORTED
);
3665 atomic_inc(&adapter
->running_cap_crqs
);
3666 ibmvnic_send_crq(adapter
, &crq
);
3668 crq
.query_capability
.capability
= cpu_to_be16(OPT_TX_COMP_SUB_QUEUES
);
3669 atomic_inc(&adapter
->running_cap_crqs
);
3670 ibmvnic_send_crq(adapter
, &crq
);
3672 crq
.query_capability
.capability
= cpu_to_be16(OPT_RX_COMP_QUEUES
);
3673 atomic_inc(&adapter
->running_cap_crqs
);
3674 ibmvnic_send_crq(adapter
, &crq
);
3676 crq
.query_capability
.capability
=
3677 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q
);
3678 atomic_inc(&adapter
->running_cap_crqs
);
3679 ibmvnic_send_crq(adapter
, &crq
);
3681 crq
.query_capability
.capability
=
3682 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ
);
3683 atomic_inc(&adapter
->running_cap_crqs
);
3684 ibmvnic_send_crq(adapter
, &crq
);
3686 crq
.query_capability
.capability
=
3687 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ
);
3688 atomic_inc(&adapter
->running_cap_crqs
);
3689 ibmvnic_send_crq(adapter
, &crq
);
3691 crq
.query_capability
.capability
= cpu_to_be16(TX_RX_DESC_REQ
);
3692 atomic_inc(&adapter
->running_cap_crqs
);
3693 ibmvnic_send_crq(adapter
, &crq
);
3696 static void handle_vpd_size_rsp(union ibmvnic_crq
*crq
,
3697 struct ibmvnic_adapter
*adapter
)
3699 struct device
*dev
= &adapter
->vdev
->dev
;
3701 if (crq
->get_vpd_size_rsp
.rc
.code
) {
3702 dev_err(dev
, "Error retrieving VPD size, rc=%x\n",
3703 crq
->get_vpd_size_rsp
.rc
.code
);
3704 complete(&adapter
->fw_done
);
3708 adapter
->vpd
->len
= be64_to_cpu(crq
->get_vpd_size_rsp
.len
);
3709 complete(&adapter
->fw_done
);
3712 static void handle_vpd_rsp(union ibmvnic_crq
*crq
,
3713 struct ibmvnic_adapter
*adapter
)
3715 struct device
*dev
= &adapter
->vdev
->dev
;
3716 unsigned char *substr
= NULL
;
3717 u8 fw_level_len
= 0;
3719 memset(adapter
->fw_version
, 0, 32);
3721 dma_unmap_single(dev
, adapter
->vpd
->dma_addr
, adapter
->vpd
->len
,
3724 if (crq
->get_vpd_rsp
.rc
.code
) {
3725 dev_err(dev
, "Error retrieving VPD from device, rc=%x\n",
3726 crq
->get_vpd_rsp
.rc
.code
);
3730 /* get the position of the firmware version info
3731 * located after the ASCII 'RM' substring in the buffer
3733 substr
= strnstr(adapter
->vpd
->buff
, "RM", adapter
->vpd
->len
);
3735 dev_info(dev
, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3739 /* get length of firmware level ASCII substring */
3740 if ((substr
+ 2) < (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
3741 fw_level_len
= *(substr
+ 2);
3743 dev_info(dev
, "Length of FW substr extrapolated VDP buff\n");
3747 /* copy firmware version string from vpd into adapter */
3748 if ((substr
+ 3 + fw_level_len
) <
3749 (adapter
->vpd
->buff
+ adapter
->vpd
->len
)) {
3750 strncpy((char *)adapter
->fw_version
, substr
+ 3, fw_level_len
);
3752 dev_info(dev
, "FW substr extrapolated VPD buff\n");
3756 if (adapter
->fw_version
[0] == '\0')
3757 strncpy((char *)adapter
->fw_version
, "N/A", 3 * sizeof(char));
3758 complete(&adapter
->fw_done
);
3761 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter
*adapter
)
3763 struct device
*dev
= &adapter
->vdev
->dev
;
3764 struct ibmvnic_query_ip_offload_buffer
*buf
= &adapter
->ip_offload_buf
;
3765 netdev_features_t old_hw_features
= 0;
3766 union ibmvnic_crq crq
;
3769 dma_unmap_single(dev
, adapter
->ip_offload_tok
,
3770 sizeof(adapter
->ip_offload_buf
), DMA_FROM_DEVICE
);
3772 netdev_dbg(adapter
->netdev
, "Query IP Offload Buffer:\n");
3773 for (i
= 0; i
< (sizeof(adapter
->ip_offload_buf
) - 1) / 8 + 1; i
++)
3774 netdev_dbg(adapter
->netdev
, "%016lx\n",
3775 ((unsigned long int *)(buf
))[i
]);
3777 netdev_dbg(adapter
->netdev
, "ipv4_chksum = %d\n", buf
->ipv4_chksum
);
3778 netdev_dbg(adapter
->netdev
, "ipv6_chksum = %d\n", buf
->ipv6_chksum
);
3779 netdev_dbg(adapter
->netdev
, "tcp_ipv4_chksum = %d\n",
3780 buf
->tcp_ipv4_chksum
);
3781 netdev_dbg(adapter
->netdev
, "tcp_ipv6_chksum = %d\n",
3782 buf
->tcp_ipv6_chksum
);
3783 netdev_dbg(adapter
->netdev
, "udp_ipv4_chksum = %d\n",
3784 buf
->udp_ipv4_chksum
);
3785 netdev_dbg(adapter
->netdev
, "udp_ipv6_chksum = %d\n",
3786 buf
->udp_ipv6_chksum
);
3787 netdev_dbg(adapter
->netdev
, "large_tx_ipv4 = %d\n",
3788 buf
->large_tx_ipv4
);
3789 netdev_dbg(adapter
->netdev
, "large_tx_ipv6 = %d\n",
3790 buf
->large_tx_ipv6
);
3791 netdev_dbg(adapter
->netdev
, "large_rx_ipv4 = %d\n",
3792 buf
->large_rx_ipv4
);
3793 netdev_dbg(adapter
->netdev
, "large_rx_ipv6 = %d\n",
3794 buf
->large_rx_ipv6
);
3795 netdev_dbg(adapter
->netdev
, "max_ipv4_hdr_sz = %d\n",
3796 buf
->max_ipv4_header_size
);
3797 netdev_dbg(adapter
->netdev
, "max_ipv6_hdr_sz = %d\n",
3798 buf
->max_ipv6_header_size
);
3799 netdev_dbg(adapter
->netdev
, "max_tcp_hdr_size = %d\n",
3800 buf
->max_tcp_header_size
);
3801 netdev_dbg(adapter
->netdev
, "max_udp_hdr_size = %d\n",
3802 buf
->max_udp_header_size
);
3803 netdev_dbg(adapter
->netdev
, "max_large_tx_size = %d\n",
3804 buf
->max_large_tx_size
);
3805 netdev_dbg(adapter
->netdev
, "max_large_rx_size = %d\n",
3806 buf
->max_large_rx_size
);
3807 netdev_dbg(adapter
->netdev
, "ipv6_ext_hdr = %d\n",
3808 buf
->ipv6_extension_header
);
3809 netdev_dbg(adapter
->netdev
, "tcp_pseudosum_req = %d\n",
3810 buf
->tcp_pseudosum_req
);
3811 netdev_dbg(adapter
->netdev
, "num_ipv6_ext_hd = %d\n",
3812 buf
->num_ipv6_ext_headers
);
3813 netdev_dbg(adapter
->netdev
, "off_ipv6_ext_hd = %d\n",
3814 buf
->off_ipv6_ext_headers
);
3816 adapter
->ip_offload_ctrl_tok
=
3817 dma_map_single(dev
, &adapter
->ip_offload_ctrl
,
3818 sizeof(adapter
->ip_offload_ctrl
), DMA_TO_DEVICE
);
3820 if (dma_mapping_error(dev
, adapter
->ip_offload_ctrl_tok
)) {
3821 dev_err(dev
, "Couldn't map ip offload control buffer\n");
3825 adapter
->ip_offload_ctrl
.len
=
3826 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
3827 adapter
->ip_offload_ctrl
.version
= cpu_to_be32(INITIAL_VERSION_IOB
);
3828 adapter
->ip_offload_ctrl
.ipv4_chksum
= buf
->ipv4_chksum
;
3829 adapter
->ip_offload_ctrl
.ipv6_chksum
= buf
->ipv6_chksum
;
3830 adapter
->ip_offload_ctrl
.tcp_ipv4_chksum
= buf
->tcp_ipv4_chksum
;
3831 adapter
->ip_offload_ctrl
.udp_ipv4_chksum
= buf
->udp_ipv4_chksum
;
3832 adapter
->ip_offload_ctrl
.tcp_ipv6_chksum
= buf
->tcp_ipv6_chksum
;
3833 adapter
->ip_offload_ctrl
.udp_ipv6_chksum
= buf
->udp_ipv6_chksum
;
3834 adapter
->ip_offload_ctrl
.large_tx_ipv4
= buf
->large_tx_ipv4
;
3835 adapter
->ip_offload_ctrl
.large_tx_ipv6
= buf
->large_tx_ipv6
;
3837 /* large_rx disabled for now, additional features needed */
3838 adapter
->ip_offload_ctrl
.large_rx_ipv4
= 0;
3839 adapter
->ip_offload_ctrl
.large_rx_ipv6
= 0;
3841 if (adapter
->state
!= VNIC_PROBING
) {
3842 old_hw_features
= adapter
->netdev
->hw_features
;
3843 adapter
->netdev
->hw_features
= 0;
3846 adapter
->netdev
->hw_features
= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
3848 if (buf
->tcp_ipv4_chksum
|| buf
->udp_ipv4_chksum
)
3849 adapter
->netdev
->hw_features
|= NETIF_F_IP_CSUM
;
3851 if (buf
->tcp_ipv6_chksum
|| buf
->udp_ipv6_chksum
)
3852 adapter
->netdev
->hw_features
|= NETIF_F_IPV6_CSUM
;
3854 if ((adapter
->netdev
->features
&
3855 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
3856 adapter
->netdev
->hw_features
|= NETIF_F_RXCSUM
;
3858 if (buf
->large_tx_ipv4
)
3859 adapter
->netdev
->hw_features
|= NETIF_F_TSO
;
3860 if (buf
->large_tx_ipv6
)
3861 adapter
->netdev
->hw_features
|= NETIF_F_TSO6
;
3863 if (adapter
->state
== VNIC_PROBING
) {
3864 adapter
->netdev
->features
|= adapter
->netdev
->hw_features
;
3865 } else if (old_hw_features
!= adapter
->netdev
->hw_features
) {
3866 netdev_features_t tmp
= 0;
3868 /* disable features no longer supported */
3869 adapter
->netdev
->features
&= adapter
->netdev
->hw_features
;
3870 /* turn on features now supported if previously enabled */
3871 tmp
= (old_hw_features
^ adapter
->netdev
->hw_features
) &
3872 adapter
->netdev
->hw_features
;
3873 adapter
->netdev
->features
|=
3874 tmp
& adapter
->netdev
->wanted_features
;
3877 memset(&crq
, 0, sizeof(crq
));
3878 crq
.control_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
3879 crq
.control_ip_offload
.cmd
= CONTROL_IP_OFFLOAD
;
3880 crq
.control_ip_offload
.len
=
3881 cpu_to_be32(sizeof(adapter
->ip_offload_ctrl
));
3882 crq
.control_ip_offload
.ioba
= cpu_to_be32(adapter
->ip_offload_ctrl_tok
);
3883 ibmvnic_send_crq(adapter
, &crq
);
3886 static const char *ibmvnic_fw_err_cause(u16 cause
)
3889 case ADAPTER_PROBLEM
:
3890 return "adapter problem";
3892 return "bus problem";
3894 return "firmware problem";
3896 return "device driver problem";
3898 return "EEH recovery";
3900 return "firmware updated";
3902 return "low Memory";
3908 static void handle_error_indication(union ibmvnic_crq
*crq
,
3909 struct ibmvnic_adapter
*adapter
)
3911 struct device
*dev
= &adapter
->vdev
->dev
;
3914 cause
= be16_to_cpu(crq
->error_indication
.error_cause
);
3916 dev_warn_ratelimited(dev
,
3917 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3918 crq
->error_indication
.flags
3919 & IBMVNIC_FATAL_ERROR
? "FATAL " : "",
3920 ibmvnic_fw_err_cause(cause
));
3922 if (crq
->error_indication
.flags
& IBMVNIC_FATAL_ERROR
)
3923 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
3925 ibmvnic_reset(adapter
, VNIC_RESET_NON_FATAL
);
3928 static int handle_change_mac_rsp(union ibmvnic_crq
*crq
,
3929 struct ibmvnic_adapter
*adapter
)
3931 struct net_device
*netdev
= adapter
->netdev
;
3932 struct device
*dev
= &adapter
->vdev
->dev
;
3935 rc
= crq
->change_mac_addr_rsp
.rc
.code
;
3937 dev_err(dev
, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc
);
3940 memcpy(netdev
->dev_addr
, &crq
->change_mac_addr_rsp
.mac_addr
[0],
3943 complete(&adapter
->fw_done
);
3947 static void handle_request_cap_rsp(union ibmvnic_crq
*crq
,
3948 struct ibmvnic_adapter
*adapter
)
3950 struct device
*dev
= &adapter
->vdev
->dev
;
3954 atomic_dec(&adapter
->running_cap_crqs
);
3955 switch (be16_to_cpu(crq
->request_capability_rsp
.capability
)) {
3957 req_value
= &adapter
->req_tx_queues
;
3961 req_value
= &adapter
->req_rx_queues
;
3964 case REQ_RX_ADD_QUEUES
:
3965 req_value
= &adapter
->req_rx_add_queues
;
3968 case REQ_TX_ENTRIES_PER_SUBCRQ
:
3969 req_value
= &adapter
->req_tx_entries_per_subcrq
;
3970 name
= "tx_entries_per_subcrq";
3972 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ
:
3973 req_value
= &adapter
->req_rx_add_entries_per_subcrq
;
3974 name
= "rx_add_entries_per_subcrq";
3977 req_value
= &adapter
->req_mtu
;
3980 case PROMISC_REQUESTED
:
3981 req_value
= &adapter
->promisc
;
3985 dev_err(dev
, "Got invalid cap request rsp %d\n",
3986 crq
->request_capability
.capability
);
3990 switch (crq
->request_capability_rsp
.rc
.code
) {
3993 case PARTIALSUCCESS
:
3994 dev_info(dev
, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3996 (long int)be64_to_cpu(crq
->request_capability_rsp
.
3999 if (be16_to_cpu(crq
->request_capability_rsp
.capability
) ==
4001 pr_err("mtu of %llu is not supported. Reverting.\n",
4003 *req_value
= adapter
->fallback
.mtu
;
4006 be64_to_cpu(crq
->request_capability_rsp
.number
);
4009 ibmvnic_send_req_caps(adapter
, 1);
4012 dev_err(dev
, "Error %d in request cap rsp\n",
4013 crq
->request_capability_rsp
.rc
.code
);
4017 /* Done receiving requested capabilities, query IP offload support */
4018 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
4019 union ibmvnic_crq newcrq
;
4020 int buf_sz
= sizeof(struct ibmvnic_query_ip_offload_buffer
);
4021 struct ibmvnic_query_ip_offload_buffer
*ip_offload_buf
=
4022 &adapter
->ip_offload_buf
;
4024 adapter
->wait_capability
= false;
4025 adapter
->ip_offload_tok
= dma_map_single(dev
, ip_offload_buf
,
4029 if (dma_mapping_error(dev
, adapter
->ip_offload_tok
)) {
4030 if (!firmware_has_feature(FW_FEATURE_CMO
))
4031 dev_err(dev
, "Couldn't map offload buffer\n");
4035 memset(&newcrq
, 0, sizeof(newcrq
));
4036 newcrq
.query_ip_offload
.first
= IBMVNIC_CRQ_CMD
;
4037 newcrq
.query_ip_offload
.cmd
= QUERY_IP_OFFLOAD
;
4038 newcrq
.query_ip_offload
.len
= cpu_to_be32(buf_sz
);
4039 newcrq
.query_ip_offload
.ioba
=
4040 cpu_to_be32(adapter
->ip_offload_tok
);
4042 ibmvnic_send_crq(adapter
, &newcrq
);
4046 static int handle_login_rsp(union ibmvnic_crq
*login_rsp_crq
,
4047 struct ibmvnic_adapter
*adapter
)
4049 struct device
*dev
= &adapter
->vdev
->dev
;
4050 struct net_device
*netdev
= adapter
->netdev
;
4051 struct ibmvnic_login_rsp_buffer
*login_rsp
= adapter
->login_rsp_buf
;
4052 struct ibmvnic_login_buffer
*login
= adapter
->login_buf
;
4055 dma_unmap_single(dev
, adapter
->login_buf_token
, adapter
->login_buf_sz
,
4057 dma_unmap_single(dev
, adapter
->login_rsp_buf_token
,
4058 adapter
->login_rsp_buf_sz
, DMA_FROM_DEVICE
);
4060 /* If the number of queues requested can't be allocated by the
4061 * server, the login response will return with code 1. We will need
4062 * to resend the login buffer with fewer queues requested.
4064 if (login_rsp_crq
->generic
.rc
.code
) {
4065 adapter
->init_done_rc
= login_rsp_crq
->generic
.rc
.code
;
4066 complete(&adapter
->init_done
);
4070 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
4072 netdev_dbg(adapter
->netdev
, "Login Response Buffer:\n");
4073 for (i
= 0; i
< (adapter
->login_rsp_buf_sz
- 1) / 8 + 1; i
++) {
4074 netdev_dbg(adapter
->netdev
, "%016lx\n",
4075 ((unsigned long int *)(adapter
->login_rsp_buf
))[i
]);
4079 if (login
->num_txcomp_subcrqs
!= login_rsp
->num_txsubm_subcrqs
||
4080 (be32_to_cpu(login
->num_rxcomp_subcrqs
) *
4081 adapter
->req_rx_add_queues
!=
4082 be32_to_cpu(login_rsp
->num_rxadd_subcrqs
))) {
4083 dev_err(dev
, "FATAL: Inconsistent login and login rsp\n");
4084 ibmvnic_remove(adapter
->vdev
);
4087 release_login_buffer(adapter
);
4088 complete(&adapter
->init_done
);
4093 static void handle_request_unmap_rsp(union ibmvnic_crq
*crq
,
4094 struct ibmvnic_adapter
*adapter
)
4096 struct device
*dev
= &adapter
->vdev
->dev
;
4099 rc
= crq
->request_unmap_rsp
.rc
.code
;
4101 dev_err(dev
, "Error %ld in REQUEST_UNMAP_RSP\n", rc
);
4104 static void handle_query_map_rsp(union ibmvnic_crq
*crq
,
4105 struct ibmvnic_adapter
*adapter
)
4107 struct net_device
*netdev
= adapter
->netdev
;
4108 struct device
*dev
= &adapter
->vdev
->dev
;
4111 rc
= crq
->query_map_rsp
.rc
.code
;
4113 dev_err(dev
, "Error %ld in QUERY_MAP_RSP\n", rc
);
4116 netdev_dbg(netdev
, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4117 crq
->query_map_rsp
.page_size
, crq
->query_map_rsp
.tot_pages
,
4118 crq
->query_map_rsp
.free_pages
);
4121 static void handle_query_cap_rsp(union ibmvnic_crq
*crq
,
4122 struct ibmvnic_adapter
*adapter
)
4124 struct net_device
*netdev
= adapter
->netdev
;
4125 struct device
*dev
= &adapter
->vdev
->dev
;
4128 atomic_dec(&adapter
->running_cap_crqs
);
4129 netdev_dbg(netdev
, "Outstanding queries: %d\n",
4130 atomic_read(&adapter
->running_cap_crqs
));
4131 rc
= crq
->query_capability
.rc
.code
;
4133 dev_err(dev
, "Error %ld in QUERY_CAP_RSP\n", rc
);
4137 switch (be16_to_cpu(crq
->query_capability
.capability
)) {
4139 adapter
->min_tx_queues
=
4140 be64_to_cpu(crq
->query_capability
.number
);
4141 netdev_dbg(netdev
, "min_tx_queues = %lld\n",
4142 adapter
->min_tx_queues
);
4145 adapter
->min_rx_queues
=
4146 be64_to_cpu(crq
->query_capability
.number
);
4147 netdev_dbg(netdev
, "min_rx_queues = %lld\n",
4148 adapter
->min_rx_queues
);
4150 case MIN_RX_ADD_QUEUES
:
4151 adapter
->min_rx_add_queues
=
4152 be64_to_cpu(crq
->query_capability
.number
);
4153 netdev_dbg(netdev
, "min_rx_add_queues = %lld\n",
4154 adapter
->min_rx_add_queues
);
4157 adapter
->max_tx_queues
=
4158 be64_to_cpu(crq
->query_capability
.number
);
4159 netdev_dbg(netdev
, "max_tx_queues = %lld\n",
4160 adapter
->max_tx_queues
);
4163 adapter
->max_rx_queues
=
4164 be64_to_cpu(crq
->query_capability
.number
);
4165 netdev_dbg(netdev
, "max_rx_queues = %lld\n",
4166 adapter
->max_rx_queues
);
4168 case MAX_RX_ADD_QUEUES
:
4169 adapter
->max_rx_add_queues
=
4170 be64_to_cpu(crq
->query_capability
.number
);
4171 netdev_dbg(netdev
, "max_rx_add_queues = %lld\n",
4172 adapter
->max_rx_add_queues
);
4174 case MIN_TX_ENTRIES_PER_SUBCRQ
:
4175 adapter
->min_tx_entries_per_subcrq
=
4176 be64_to_cpu(crq
->query_capability
.number
);
4177 netdev_dbg(netdev
, "min_tx_entries_per_subcrq = %lld\n",
4178 adapter
->min_tx_entries_per_subcrq
);
4180 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ
:
4181 adapter
->min_rx_add_entries_per_subcrq
=
4182 be64_to_cpu(crq
->query_capability
.number
);
4183 netdev_dbg(netdev
, "min_rx_add_entrs_per_subcrq = %lld\n",
4184 adapter
->min_rx_add_entries_per_subcrq
);
4186 case MAX_TX_ENTRIES_PER_SUBCRQ
:
4187 adapter
->max_tx_entries_per_subcrq
=
4188 be64_to_cpu(crq
->query_capability
.number
);
4189 netdev_dbg(netdev
, "max_tx_entries_per_subcrq = %lld\n",
4190 adapter
->max_tx_entries_per_subcrq
);
4192 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ
:
4193 adapter
->max_rx_add_entries_per_subcrq
=
4194 be64_to_cpu(crq
->query_capability
.number
);
4195 netdev_dbg(netdev
, "max_rx_add_entrs_per_subcrq = %lld\n",
4196 adapter
->max_rx_add_entries_per_subcrq
);
4198 case TCP_IP_OFFLOAD
:
4199 adapter
->tcp_ip_offload
=
4200 be64_to_cpu(crq
->query_capability
.number
);
4201 netdev_dbg(netdev
, "tcp_ip_offload = %lld\n",
4202 adapter
->tcp_ip_offload
);
4204 case PROMISC_SUPPORTED
:
4205 adapter
->promisc_supported
=
4206 be64_to_cpu(crq
->query_capability
.number
);
4207 netdev_dbg(netdev
, "promisc_supported = %lld\n",
4208 adapter
->promisc_supported
);
4211 adapter
->min_mtu
= be64_to_cpu(crq
->query_capability
.number
);
4212 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
4213 netdev_dbg(netdev
, "min_mtu = %lld\n", adapter
->min_mtu
);
4216 adapter
->max_mtu
= be64_to_cpu(crq
->query_capability
.number
);
4217 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
4218 netdev_dbg(netdev
, "max_mtu = %lld\n", adapter
->max_mtu
);
4220 case MAX_MULTICAST_FILTERS
:
4221 adapter
->max_multicast_filters
=
4222 be64_to_cpu(crq
->query_capability
.number
);
4223 netdev_dbg(netdev
, "max_multicast_filters = %lld\n",
4224 adapter
->max_multicast_filters
);
4226 case VLAN_HEADER_INSERTION
:
4227 adapter
->vlan_header_insertion
=
4228 be64_to_cpu(crq
->query_capability
.number
);
4229 if (adapter
->vlan_header_insertion
)
4230 netdev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
4231 netdev_dbg(netdev
, "vlan_header_insertion = %lld\n",
4232 adapter
->vlan_header_insertion
);
4234 case RX_VLAN_HEADER_INSERTION
:
4235 adapter
->rx_vlan_header_insertion
=
4236 be64_to_cpu(crq
->query_capability
.number
);
4237 netdev_dbg(netdev
, "rx_vlan_header_insertion = %lld\n",
4238 adapter
->rx_vlan_header_insertion
);
4240 case MAX_TX_SG_ENTRIES
:
4241 adapter
->max_tx_sg_entries
=
4242 be64_to_cpu(crq
->query_capability
.number
);
4243 netdev_dbg(netdev
, "max_tx_sg_entries = %lld\n",
4244 adapter
->max_tx_sg_entries
);
4246 case RX_SG_SUPPORTED
:
4247 adapter
->rx_sg_supported
=
4248 be64_to_cpu(crq
->query_capability
.number
);
4249 netdev_dbg(netdev
, "rx_sg_supported = %lld\n",
4250 adapter
->rx_sg_supported
);
4252 case OPT_TX_COMP_SUB_QUEUES
:
4253 adapter
->opt_tx_comp_sub_queues
=
4254 be64_to_cpu(crq
->query_capability
.number
);
4255 netdev_dbg(netdev
, "opt_tx_comp_sub_queues = %lld\n",
4256 adapter
->opt_tx_comp_sub_queues
);
4258 case OPT_RX_COMP_QUEUES
:
4259 adapter
->opt_rx_comp_queues
=
4260 be64_to_cpu(crq
->query_capability
.number
);
4261 netdev_dbg(netdev
, "opt_rx_comp_queues = %lld\n",
4262 adapter
->opt_rx_comp_queues
);
4264 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q
:
4265 adapter
->opt_rx_bufadd_q_per_rx_comp_q
=
4266 be64_to_cpu(crq
->query_capability
.number
);
4267 netdev_dbg(netdev
, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4268 adapter
->opt_rx_bufadd_q_per_rx_comp_q
);
4270 case OPT_TX_ENTRIES_PER_SUBCRQ
:
4271 adapter
->opt_tx_entries_per_subcrq
=
4272 be64_to_cpu(crq
->query_capability
.number
);
4273 netdev_dbg(netdev
, "opt_tx_entries_per_subcrq = %lld\n",
4274 adapter
->opt_tx_entries_per_subcrq
);
4276 case OPT_RXBA_ENTRIES_PER_SUBCRQ
:
4277 adapter
->opt_rxba_entries_per_subcrq
=
4278 be64_to_cpu(crq
->query_capability
.number
);
4279 netdev_dbg(netdev
, "opt_rxba_entries_per_subcrq = %lld\n",
4280 adapter
->opt_rxba_entries_per_subcrq
);
4282 case TX_RX_DESC_REQ
:
4283 adapter
->tx_rx_desc_req
= crq
->query_capability
.number
;
4284 netdev_dbg(netdev
, "tx_rx_desc_req = %llx\n",
4285 adapter
->tx_rx_desc_req
);
4289 netdev_err(netdev
, "Got invalid cap rsp %d\n",
4290 crq
->query_capability
.capability
);
4294 if (atomic_read(&adapter
->running_cap_crqs
) == 0) {
4295 adapter
->wait_capability
= false;
4296 ibmvnic_send_req_caps(adapter
, 0);
4300 static void ibmvnic_handle_crq(union ibmvnic_crq
*crq
,
4301 struct ibmvnic_adapter
*adapter
)
4303 struct ibmvnic_generic_crq
*gen_crq
= &crq
->generic
;
4304 struct net_device
*netdev
= adapter
->netdev
;
4305 struct device
*dev
= &adapter
->vdev
->dev
;
4306 u64
*u64_crq
= (u64
*)crq
;
4309 netdev_dbg(netdev
, "Handling CRQ: %016lx %016lx\n",
4310 (unsigned long int)cpu_to_be64(u64_crq
[0]),
4311 (unsigned long int)cpu_to_be64(u64_crq
[1]));
4312 switch (gen_crq
->first
) {
4313 case IBMVNIC_CRQ_INIT_RSP
:
4314 switch (gen_crq
->cmd
) {
4315 case IBMVNIC_CRQ_INIT
:
4316 dev_info(dev
, "Partner initialized\n");
4317 adapter
->from_passive_init
= true;
4318 adapter
->failover_pending
= false;
4319 if (!completion_done(&adapter
->init_done
)) {
4320 complete(&adapter
->init_done
);
4321 adapter
->init_done_rc
= -EIO
;
4323 ibmvnic_reset(adapter
, VNIC_RESET_FAILOVER
);
4325 case IBMVNIC_CRQ_INIT_COMPLETE
:
4326 dev_info(dev
, "Partner initialization complete\n");
4327 adapter
->crq
.active
= true;
4328 send_version_xchg(adapter
);
4331 dev_err(dev
, "Unknown crq cmd: %d\n", gen_crq
->cmd
);
4334 case IBMVNIC_CRQ_XPORT_EVENT
:
4335 netif_carrier_off(netdev
);
4336 adapter
->crq
.active
= false;
4337 if (adapter
->resetting
)
4338 adapter
->force_reset_recovery
= true;
4339 if (gen_crq
->cmd
== IBMVNIC_PARTITION_MIGRATED
) {
4340 dev_info(dev
, "Migrated, re-enabling adapter\n");
4341 ibmvnic_reset(adapter
, VNIC_RESET_MOBILITY
);
4342 } else if (gen_crq
->cmd
== IBMVNIC_DEVICE_FAILOVER
) {
4343 dev_info(dev
, "Backing device failover detected\n");
4344 adapter
->failover_pending
= true;
4346 /* The adapter lost the connection */
4347 dev_err(dev
, "Virtual Adapter failed (rc=%d)\n",
4349 ibmvnic_reset(adapter
, VNIC_RESET_FATAL
);
4352 case IBMVNIC_CRQ_CMD_RSP
:
4355 dev_err(dev
, "Got an invalid msg type 0x%02x\n",
4360 switch (gen_crq
->cmd
) {
4361 case VERSION_EXCHANGE_RSP
:
4362 rc
= crq
->version_exchange_rsp
.rc
.code
;
4364 dev_err(dev
, "Error %ld in VERSION_EXCHG_RSP\n", rc
);
4367 dev_info(dev
, "Partner protocol version is %d\n",
4368 crq
->version_exchange_rsp
.version
);
4369 if (be16_to_cpu(crq
->version_exchange_rsp
.version
) <
4372 be16_to_cpu(crq
->version_exchange_rsp
.version
);
4373 send_cap_queries(adapter
);
4375 case QUERY_CAPABILITY_RSP
:
4376 handle_query_cap_rsp(crq
, adapter
);
4379 handle_query_map_rsp(crq
, adapter
);
4381 case REQUEST_MAP_RSP
:
4382 adapter
->fw_done_rc
= crq
->request_map_rsp
.rc
.code
;
4383 complete(&adapter
->fw_done
);
4385 case REQUEST_UNMAP_RSP
:
4386 handle_request_unmap_rsp(crq
, adapter
);
4388 case REQUEST_CAPABILITY_RSP
:
4389 handle_request_cap_rsp(crq
, adapter
);
4392 netdev_dbg(netdev
, "Got Login Response\n");
4393 handle_login_rsp(crq
, adapter
);
4395 case LOGICAL_LINK_STATE_RSP
:
4397 "Got Logical Link State Response, state: %d rc: %d\n",
4398 crq
->logical_link_state_rsp
.link_state
,
4399 crq
->logical_link_state_rsp
.rc
.code
);
4400 adapter
->logical_link_state
=
4401 crq
->logical_link_state_rsp
.link_state
;
4402 adapter
->init_done_rc
= crq
->logical_link_state_rsp
.rc
.code
;
4403 complete(&adapter
->init_done
);
4405 case LINK_STATE_INDICATION
:
4406 netdev_dbg(netdev
, "Got Logical Link State Indication\n");
4407 adapter
->phys_link_state
=
4408 crq
->link_state_indication
.phys_link_state
;
4409 adapter
->logical_link_state
=
4410 crq
->link_state_indication
.logical_link_state
;
4412 case CHANGE_MAC_ADDR_RSP
:
4413 netdev_dbg(netdev
, "Got MAC address change Response\n");
4414 adapter
->fw_done_rc
= handle_change_mac_rsp(crq
, adapter
);
4416 case ERROR_INDICATION
:
4417 netdev_dbg(netdev
, "Got Error Indication\n");
4418 handle_error_indication(crq
, adapter
);
4420 case REQUEST_STATISTICS_RSP
:
4421 netdev_dbg(netdev
, "Got Statistics Response\n");
4422 complete(&adapter
->stats_done
);
4424 case QUERY_IP_OFFLOAD_RSP
:
4425 netdev_dbg(netdev
, "Got Query IP offload Response\n");
4426 handle_query_ip_offload_rsp(adapter
);
4428 case MULTICAST_CTRL_RSP
:
4429 netdev_dbg(netdev
, "Got multicast control Response\n");
4431 case CONTROL_IP_OFFLOAD_RSP
:
4432 netdev_dbg(netdev
, "Got Control IP offload Response\n");
4433 dma_unmap_single(dev
, adapter
->ip_offload_ctrl_tok
,
4434 sizeof(adapter
->ip_offload_ctrl
),
4436 complete(&adapter
->init_done
);
4438 case COLLECT_FW_TRACE_RSP
:
4439 netdev_dbg(netdev
, "Got Collect firmware trace Response\n");
4440 complete(&adapter
->fw_done
);
4442 case GET_VPD_SIZE_RSP
:
4443 handle_vpd_size_rsp(crq
, adapter
);
4446 handle_vpd_rsp(crq
, adapter
);
4449 netdev_err(netdev
, "Got an invalid cmd type 0x%02x\n",
4454 static irqreturn_t
ibmvnic_interrupt(int irq
, void *instance
)
4456 struct ibmvnic_adapter
*adapter
= instance
;
4458 tasklet_schedule(&adapter
->tasklet
);
4462 static void ibmvnic_tasklet(void *data
)
4464 struct ibmvnic_adapter
*adapter
= data
;
4465 struct ibmvnic_crq_queue
*queue
= &adapter
->crq
;
4466 union ibmvnic_crq
*crq
;
4467 unsigned long flags
;
4470 spin_lock_irqsave(&queue
->lock
, flags
);
4472 /* Pull all the valid messages off the CRQ */
4473 while ((crq
= ibmvnic_next_crq(adapter
)) != NULL
) {
4474 ibmvnic_handle_crq(crq
, adapter
);
4475 crq
->generic
.first
= 0;
4478 /* remain in tasklet until all
4479 * capabilities responses are received
4481 if (!adapter
->wait_capability
)
4484 /* if capabilities CRQ's were sent in this tasklet, the following
4485 * tasklet must wait until all responses are received
4487 if (atomic_read(&adapter
->running_cap_crqs
) != 0)
4488 adapter
->wait_capability
= true;
4489 spin_unlock_irqrestore(&queue
->lock
, flags
);
4492 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter
*adapter
)
4494 struct vio_dev
*vdev
= adapter
->vdev
;
4498 rc
= plpar_hcall_norets(H_ENABLE_CRQ
, vdev
->unit_address
);
4499 } while (rc
== H_IN_PROGRESS
|| rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4502 dev_err(&vdev
->dev
, "Error enabling adapter (rc=%d)\n", rc
);
4507 static int ibmvnic_reset_crq(struct ibmvnic_adapter
*adapter
)
4509 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4510 struct device
*dev
= &adapter
->vdev
->dev
;
4511 struct vio_dev
*vdev
= adapter
->vdev
;
4516 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4517 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4519 /* Clean out the queue */
4520 memset(crq
->msgs
, 0, PAGE_SIZE
);
4522 crq
->active
= false;
4524 /* And re-open it again */
4525 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
4526 crq
->msg_token
, PAGE_SIZE
);
4529 /* Adapter is good, but other end is not ready */
4530 dev_warn(dev
, "Partner adapter not ready\n");
4532 dev_warn(dev
, "Couldn't register crq (rc=%d)\n", rc
);
4537 static void release_crq_queue(struct ibmvnic_adapter
*adapter
)
4539 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4540 struct vio_dev
*vdev
= adapter
->vdev
;
4546 netdev_dbg(adapter
->netdev
, "Releasing CRQ\n");
4547 free_irq(vdev
->irq
, adapter
);
4548 tasklet_kill(&adapter
->tasklet
);
4550 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4551 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4553 dma_unmap_single(&vdev
->dev
, crq
->msg_token
, PAGE_SIZE
,
4555 free_page((unsigned long)crq
->msgs
);
4557 crq
->active
= false;
4560 static int init_crq_queue(struct ibmvnic_adapter
*adapter
)
4562 struct ibmvnic_crq_queue
*crq
= &adapter
->crq
;
4563 struct device
*dev
= &adapter
->vdev
->dev
;
4564 struct vio_dev
*vdev
= adapter
->vdev
;
4565 int rc
, retrc
= -ENOMEM
;
4570 crq
->msgs
= (union ibmvnic_crq
*)get_zeroed_page(GFP_KERNEL
);
4571 /* Should we allocate more than one page? */
4576 crq
->size
= PAGE_SIZE
/ sizeof(*crq
->msgs
);
4577 crq
->msg_token
= dma_map_single(dev
, crq
->msgs
, PAGE_SIZE
,
4579 if (dma_mapping_error(dev
, crq
->msg_token
))
4582 rc
= plpar_hcall_norets(H_REG_CRQ
, vdev
->unit_address
,
4583 crq
->msg_token
, PAGE_SIZE
);
4585 if (rc
== H_RESOURCE
)
4586 /* maybe kexecing and resource is busy. try a reset */
4587 rc
= ibmvnic_reset_crq(adapter
);
4590 if (rc
== H_CLOSED
) {
4591 dev_warn(dev
, "Partner adapter not ready\n");
4593 dev_warn(dev
, "Error %d opening adapter\n", rc
);
4594 goto reg_crq_failed
;
4599 tasklet_init(&adapter
->tasklet
, (void *)ibmvnic_tasklet
,
4600 (unsigned long)adapter
);
4602 netdev_dbg(adapter
->netdev
, "registering irq 0x%x\n", vdev
->irq
);
4603 rc
= request_irq(vdev
->irq
, ibmvnic_interrupt
, 0, IBMVNIC_NAME
,
4606 dev_err(dev
, "Couldn't register irq 0x%x. rc=%d\n",
4608 goto req_irq_failed
;
4611 rc
= vio_enable_interrupts(vdev
);
4613 dev_err(dev
, "Error %d enabling interrupts\n", rc
);
4614 goto req_irq_failed
;
4618 spin_lock_init(&crq
->lock
);
4623 tasklet_kill(&adapter
->tasklet
);
4625 rc
= plpar_hcall_norets(H_FREE_CRQ
, vdev
->unit_address
);
4626 } while (rc
== H_BUSY
|| H_IS_LONG_BUSY(rc
));
4628 dma_unmap_single(dev
, crq
->msg_token
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
4630 free_page((unsigned long)crq
->msgs
);
4635 static int ibmvnic_reset_init(struct ibmvnic_adapter
*adapter
)
4637 struct device
*dev
= &adapter
->vdev
->dev
;
4638 unsigned long timeout
= msecs_to_jiffies(30000);
4639 u64 old_num_rx_queues
, old_num_tx_queues
;
4642 adapter
->from_passive_init
= false;
4644 old_num_rx_queues
= adapter
->req_rx_queues
;
4645 old_num_tx_queues
= adapter
->req_tx_queues
;
4647 reinit_completion(&adapter
->init_done
);
4648 adapter
->init_done_rc
= 0;
4649 ibmvnic_send_crq_init(adapter
);
4650 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
4651 dev_err(dev
, "Initialization sequence timed out\n");
4655 if (adapter
->init_done_rc
) {
4656 release_crq_queue(adapter
);
4657 return adapter
->init_done_rc
;
4660 if (adapter
->from_passive_init
) {
4661 adapter
->state
= VNIC_OPEN
;
4662 adapter
->from_passive_init
= false;
4666 if (adapter
->resetting
&& !adapter
->wait_for_reset
&&
4667 adapter
->reset_reason
!= VNIC_RESET_MOBILITY
) {
4668 if (adapter
->req_rx_queues
!= old_num_rx_queues
||
4669 adapter
->req_tx_queues
!= old_num_tx_queues
) {
4670 release_sub_crqs(adapter
, 0);
4671 rc
= init_sub_crqs(adapter
);
4673 rc
= reset_sub_crq_queues(adapter
);
4676 rc
= init_sub_crqs(adapter
);
4680 dev_err(dev
, "Initialization of sub crqs failed\n");
4681 release_crq_queue(adapter
);
4685 rc
= init_sub_crq_irqs(adapter
);
4687 dev_err(dev
, "Failed to initialize sub crq irqs\n");
4688 release_crq_queue(adapter
);
4694 static int ibmvnic_init(struct ibmvnic_adapter
*adapter
)
4696 struct device
*dev
= &adapter
->vdev
->dev
;
4697 unsigned long timeout
= msecs_to_jiffies(30000);
4700 adapter
->from_passive_init
= false;
4702 adapter
->init_done_rc
= 0;
4703 ibmvnic_send_crq_init(adapter
);
4704 if (!wait_for_completion_timeout(&adapter
->init_done
, timeout
)) {
4705 dev_err(dev
, "Initialization sequence timed out\n");
4709 if (adapter
->init_done_rc
) {
4710 release_crq_queue(adapter
);
4711 return adapter
->init_done_rc
;
4714 if (adapter
->from_passive_init
) {
4715 adapter
->state
= VNIC_OPEN
;
4716 adapter
->from_passive_init
= false;
4720 rc
= init_sub_crqs(adapter
);
4722 dev_err(dev
, "Initialization of sub crqs failed\n");
4723 release_crq_queue(adapter
);
4727 rc
= init_sub_crq_irqs(adapter
);
4729 dev_err(dev
, "Failed to initialize sub crq irqs\n");
4730 release_crq_queue(adapter
);
4736 static struct device_attribute dev_attr_failover
;
4738 static int ibmvnic_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
4740 struct ibmvnic_adapter
*adapter
;
4741 struct net_device
*netdev
;
4742 unsigned char *mac_addr_p
;
4745 dev_dbg(&dev
->dev
, "entering ibmvnic_probe for UA 0x%x\n",
4748 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
,
4749 VETH_MAC_ADDR
, NULL
);
4752 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4753 __FILE__
, __LINE__
);
4757 netdev
= alloc_etherdev_mq(sizeof(struct ibmvnic_adapter
),
4758 IBMVNIC_MAX_QUEUES
);
4762 adapter
= netdev_priv(netdev
);
4763 adapter
->state
= VNIC_PROBING
;
4764 dev_set_drvdata(&dev
->dev
, netdev
);
4765 adapter
->vdev
= dev
;
4766 adapter
->netdev
= netdev
;
4768 ether_addr_copy(adapter
->mac_addr
, mac_addr_p
);
4769 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
4770 netdev
->irq
= dev
->irq
;
4771 netdev
->netdev_ops
= &ibmvnic_netdev_ops
;
4772 netdev
->ethtool_ops
= &ibmvnic_ethtool_ops
;
4773 SET_NETDEV_DEV(netdev
, &dev
->dev
);
4775 spin_lock_init(&adapter
->stats_lock
);
4777 INIT_WORK(&adapter
->ibmvnic_reset
, __ibmvnic_reset
);
4778 INIT_LIST_HEAD(&adapter
->rwi_list
);
4779 spin_lock_init(&adapter
->rwi_lock
);
4780 init_completion(&adapter
->init_done
);
4781 adapter
->resetting
= false;
4783 adapter
->mac_change_pending
= false;
4786 rc
= init_crq_queue(adapter
);
4788 dev_err(&dev
->dev
, "Couldn't initialize crq. rc=%d\n",
4790 goto ibmvnic_init_fail
;
4793 rc
= ibmvnic_init(adapter
);
4794 if (rc
&& rc
!= EAGAIN
)
4795 goto ibmvnic_init_fail
;
4796 } while (rc
== EAGAIN
);
4798 rc
= init_stats_buffers(adapter
);
4800 goto ibmvnic_init_fail
;
4802 rc
= init_stats_token(adapter
);
4804 goto ibmvnic_stats_fail
;
4806 netdev
->mtu
= adapter
->req_mtu
- ETH_HLEN
;
4807 netdev
->min_mtu
= adapter
->min_mtu
- ETH_HLEN
;
4808 netdev
->max_mtu
= adapter
->max_mtu
- ETH_HLEN
;
4810 rc
= device_create_file(&dev
->dev
, &dev_attr_failover
);
4812 goto ibmvnic_dev_file_err
;
4814 netif_carrier_off(netdev
);
4815 rc
= register_netdev(netdev
);
4817 dev_err(&dev
->dev
, "failed to register netdev rc=%d\n", rc
);
4818 goto ibmvnic_register_fail
;
4820 dev_info(&dev
->dev
, "ibmvnic registered\n");
4822 adapter
->state
= VNIC_PROBED
;
4824 adapter
->wait_for_reset
= false;
4828 ibmvnic_register_fail
:
4829 device_remove_file(&dev
->dev
, &dev_attr_failover
);
4831 ibmvnic_dev_file_err
:
4832 release_stats_token(adapter
);
4835 release_stats_buffers(adapter
);
4838 release_sub_crqs(adapter
, 1);
4839 release_crq_queue(adapter
);
4840 free_netdev(netdev
);
4845 static int ibmvnic_remove(struct vio_dev
*dev
)
4847 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
4848 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4850 adapter
->state
= VNIC_REMOVING
;
4852 unregister_netdevice(netdev
);
4854 release_resources(adapter
);
4855 release_sub_crqs(adapter
, 1);
4856 release_crq_queue(adapter
);
4858 release_stats_token(adapter
);
4859 release_stats_buffers(adapter
);
4861 adapter
->state
= VNIC_REMOVED
;
4864 device_remove_file(&dev
->dev
, &dev_attr_failover
);
4865 free_netdev(netdev
);
4866 dev_set_drvdata(&dev
->dev
, NULL
);
4871 static ssize_t
failover_store(struct device
*dev
, struct device_attribute
*attr
,
4872 const char *buf
, size_t count
)
4874 struct net_device
*netdev
= dev_get_drvdata(dev
);
4875 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4876 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
4877 __be64 session_token
;
4880 if (!sysfs_streq(buf
, "1"))
4883 rc
= plpar_hcall(H_VIOCTL
, retbuf
, adapter
->vdev
->unit_address
,
4884 H_GET_SESSION_TOKEN
, 0, 0, 0);
4886 netdev_err(netdev
, "Couldn't retrieve session token, rc %ld\n",
4891 session_token
= (__be64
)retbuf
[0];
4892 netdev_dbg(netdev
, "Initiating client failover, session id %llx\n",
4893 be64_to_cpu(session_token
));
4894 rc
= plpar_hcall_norets(H_VIOCTL
, adapter
->vdev
->unit_address
,
4895 H_SESSION_ERR_DETECTED
, session_token
, 0, 0);
4897 netdev_err(netdev
, "Client initiated failover failed, rc %ld\n",
4905 static DEVICE_ATTR_WO(failover
);
4907 static unsigned long ibmvnic_get_desired_dma(struct vio_dev
*vdev
)
4909 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
4910 struct ibmvnic_adapter
*adapter
;
4911 struct iommu_table
*tbl
;
4912 unsigned long ret
= 0;
4915 tbl
= get_iommu_table_base(&vdev
->dev
);
4917 /* netdev inits at probe time along with the structures we need below*/
4919 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT
, tbl
);
4921 adapter
= netdev_priv(netdev
);
4923 ret
+= PAGE_SIZE
; /* the crq message queue */
4924 ret
+= IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics
), tbl
);
4926 for (i
= 0; i
< adapter
->req_tx_queues
+ adapter
->req_rx_queues
; i
++)
4927 ret
+= 4 * PAGE_SIZE
; /* the scrq message queue */
4929 for (i
= 0; i
< be32_to_cpu(adapter
->login_rsp_buf
->num_rxadd_subcrqs
);
4931 ret
+= adapter
->rx_pool
[i
].size
*
4932 IOMMU_PAGE_ALIGN(adapter
->rx_pool
[i
].buff_size
, tbl
);
4937 static int ibmvnic_resume(struct device
*dev
)
4939 struct net_device
*netdev
= dev_get_drvdata(dev
);
4940 struct ibmvnic_adapter
*adapter
= netdev_priv(netdev
);
4942 if (adapter
->state
!= VNIC_OPEN
)
4945 tasklet_schedule(&adapter
->tasklet
);
4950 static const struct vio_device_id ibmvnic_device_table
[] = {
4951 {"network", "IBM,vnic"},
4954 MODULE_DEVICE_TABLE(vio
, ibmvnic_device_table
);
4956 static const struct dev_pm_ops ibmvnic_pm_ops
= {
4957 .resume
= ibmvnic_resume
4960 static struct vio_driver ibmvnic_driver
= {
4961 .id_table
= ibmvnic_device_table
,
4962 .probe
= ibmvnic_probe
,
4963 .remove
= ibmvnic_remove
,
4964 .get_desired_dma
= ibmvnic_get_desired_dma
,
4965 .name
= ibmvnic_driver_name
,
4966 .pm
= &ibmvnic_pm_ops
,
4969 /* module functions */
4970 static int __init
ibmvnic_module_init(void)
4972 pr_info("%s: %s %s\n", ibmvnic_driver_name
, ibmvnic_driver_string
,
4973 IBMVNIC_DRIVER_VERSION
);
4975 return vio_register_driver(&ibmvnic_driver
);
4978 static void __exit
ibmvnic_module_exit(void)
4980 vio_unregister_driver(&ibmvnic_driver
);
4983 module_init(ibmvnic_module_init
);
4984 module_exit(ibmvnic_module_exit
);