1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
22 #include <linux/tcp.h>
23 #include <linux/mii.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/netdevice.h>
29 #include <linux/netdev_features.h>
30 #include <linux/rcutree.h>
31 #include <linux/skbuff.h>
32 #include <linux/vmalloc.h>
34 #include <net/iucv/af_iucv.h>
35 #include <net/dsfield.h>
38 #include <asm/ebcdic.h>
39 #include <asm/chpid.h>
40 #include <asm/sysinfo.h>
43 #include <asm/ccwdev.h>
44 #include <asm/cpcmd.h>
46 #include "qeth_core.h"
48 struct qeth_dbf_info qeth_dbf
[QETH_DBF_INFOS
] = {
49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
51 [QETH_DBF_SETUP
] = {"qeth_setup",
52 8, 1, 8, 5, &debug_hex_ascii_view
, NULL
},
53 [QETH_DBF_MSG
] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54 &debug_sprintf_view
, NULL
},
55 [QETH_DBF_CTRL
] = {"qeth_control",
56 8, 1, QETH_DBF_CTRL_LEN
, 5, &debug_hex_ascii_view
, NULL
},
58 EXPORT_SYMBOL_GPL(qeth_dbf
);
60 static struct kmem_cache
*qeth_core_header_cache
;
61 static struct kmem_cache
*qeth_qdio_outbuf_cache
;
62 static struct kmem_cache
*qeth_qaob_cache
;
64 static struct device
*qeth_core_root_dev
;
65 static struct dentry
*qeth_debugfs_root
;
66 static struct lock_class_key qdio_out_skb_queue_key
;
68 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
69 struct qeth_cmd_buffer
*iob
,
70 unsigned int data_length
);
71 static int qeth_qdio_establish(struct qeth_card
*);
72 static void qeth_free_qdio_queues(struct qeth_card
*card
);
74 static const char *qeth_get_cardname(struct qeth_card
*card
)
76 if (IS_VM_NIC(card
)) {
77 switch (card
->info
.type
) {
78 case QETH_CARD_TYPE_OSD
:
79 return " Virtual NIC QDIO";
80 case QETH_CARD_TYPE_IQD
:
81 return " Virtual NIC Hiper";
82 case QETH_CARD_TYPE_OSM
:
83 return " Virtual NIC QDIO - OSM";
84 case QETH_CARD_TYPE_OSX
:
85 return " Virtual NIC QDIO - OSX";
90 switch (card
->info
.type
) {
91 case QETH_CARD_TYPE_OSD
:
92 return " OSD Express";
93 case QETH_CARD_TYPE_IQD
:
94 return " HiperSockets";
95 case QETH_CARD_TYPE_OSM
:
97 case QETH_CARD_TYPE_OSX
:
106 /* max length to be returned: 14 */
107 const char *qeth_get_cardname_short(struct qeth_card
*card
)
109 if (IS_VM_NIC(card
)) {
110 switch (card
->info
.type
) {
111 case QETH_CARD_TYPE_OSD
:
112 return "Virt.NIC QDIO";
113 case QETH_CARD_TYPE_IQD
:
114 return "Virt.NIC Hiper";
115 case QETH_CARD_TYPE_OSM
:
116 return "Virt.NIC OSM";
117 case QETH_CARD_TYPE_OSX
:
118 return "Virt.NIC OSX";
123 switch (card
->info
.type
) {
124 case QETH_CARD_TYPE_OSD
:
125 switch (card
->info
.link_type
) {
126 case QETH_LINK_TYPE_FAST_ETH
:
128 case QETH_LINK_TYPE_HSTR
:
130 case QETH_LINK_TYPE_GBIT_ETH
:
132 case QETH_LINK_TYPE_10GBIT_ETH
:
134 case QETH_LINK_TYPE_25GBIT_ETH
:
136 case QETH_LINK_TYPE_LANE_ETH100
:
137 return "OSD_FE_LANE";
138 case QETH_LINK_TYPE_LANE_TR
:
139 return "OSD_TR_LANE";
140 case QETH_LINK_TYPE_LANE_ETH1000
:
141 return "OSD_GbE_LANE";
142 case QETH_LINK_TYPE_LANE
:
143 return "OSD_ATM_LANE";
145 return "OSD_Express";
147 case QETH_CARD_TYPE_IQD
:
148 return "HiperSockets";
149 case QETH_CARD_TYPE_OSM
:
151 case QETH_CARD_TYPE_OSX
:
160 void qeth_set_allowed_threads(struct qeth_card
*card
, unsigned long threads
,
161 int clear_start_mask
)
165 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
166 card
->thread_allowed_mask
= threads
;
167 if (clear_start_mask
)
168 card
->thread_start_mask
&= threads
;
169 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
170 wake_up(&card
->wait_q
);
172 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads
);
174 int qeth_threads_running(struct qeth_card
*card
, unsigned long threads
)
179 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
180 rc
= (card
->thread_running_mask
& threads
);
181 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
184 EXPORT_SYMBOL_GPL(qeth_threads_running
);
186 static void qeth_clear_working_pool_list(struct qeth_card
*card
)
188 struct qeth_buffer_pool_entry
*pool_entry
, *tmp
;
189 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
192 QETH_CARD_TEXT(card
, 5, "clwrklst");
193 list_for_each_entry_safe(pool_entry
, tmp
,
194 &card
->qdio
.in_buf_pool
.entry_list
, list
)
195 list_del(&pool_entry
->list
);
197 for (i
= 0; i
< ARRAY_SIZE(queue
->bufs
); i
++)
198 queue
->bufs
[i
].pool_entry
= NULL
;
201 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry
*entry
)
205 for (i
= 0; i
< ARRAY_SIZE(entry
->elements
); i
++) {
206 if (entry
->elements
[i
])
207 __free_page(entry
->elements
[i
]);
213 static void qeth_free_buffer_pool(struct qeth_card
*card
)
215 struct qeth_buffer_pool_entry
*entry
, *tmp
;
217 list_for_each_entry_safe(entry
, tmp
, &card
->qdio
.init_pool
.entry_list
,
219 list_del(&entry
->init_list
);
220 qeth_free_pool_entry(entry
);
224 static struct qeth_buffer_pool_entry
*qeth_alloc_pool_entry(unsigned int pages
)
226 struct qeth_buffer_pool_entry
*entry
;
229 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
233 for (i
= 0; i
< pages
; i
++) {
234 entry
->elements
[i
] = __dev_alloc_page(GFP_KERNEL
);
236 if (!entry
->elements
[i
]) {
237 qeth_free_pool_entry(entry
);
245 static int qeth_alloc_buffer_pool(struct qeth_card
*card
)
247 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
250 QETH_CARD_TEXT(card
, 5, "alocpool");
251 for (i
= 0; i
< card
->qdio
.init_pool
.buf_count
; ++i
) {
252 struct qeth_buffer_pool_entry
*entry
;
254 entry
= qeth_alloc_pool_entry(buf_elements
);
256 qeth_free_buffer_pool(card
);
260 list_add(&entry
->init_list
, &card
->qdio
.init_pool
.entry_list
);
265 int qeth_resize_buffer_pool(struct qeth_card
*card
, unsigned int count
)
267 unsigned int buf_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
268 struct qeth_qdio_buffer_pool
*pool
= &card
->qdio
.init_pool
;
269 struct qeth_buffer_pool_entry
*entry
, *tmp
;
270 int delta
= count
- pool
->buf_count
;
273 QETH_CARD_TEXT(card
, 2, "realcbp");
275 /* Defer until pool is allocated: */
276 if (list_empty(&pool
->entry_list
))
279 /* Remove entries from the pool: */
281 entry
= list_first_entry(&pool
->entry_list
,
282 struct qeth_buffer_pool_entry
,
284 list_del(&entry
->init_list
);
285 qeth_free_pool_entry(entry
);
290 /* Allocate additional entries: */
292 entry
= qeth_alloc_pool_entry(buf_elements
);
294 list_for_each_entry_safe(entry
, tmp
, &entries
,
296 list_del(&entry
->init_list
);
297 qeth_free_pool_entry(entry
);
303 list_add(&entry
->init_list
, &entries
);
308 list_splice(&entries
, &pool
->entry_list
);
311 card
->qdio
.in_buf_pool
.buf_count
= count
;
312 pool
->buf_count
= count
;
315 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool
);
317 static void qeth_free_qdio_queue(struct qeth_qdio_q
*q
)
322 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
326 static struct qeth_qdio_q
*qeth_alloc_qdio_queue(void)
328 struct qeth_qdio_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
334 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
)) {
339 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; ++i
)
340 q
->bufs
[i
].buffer
= q
->qdio_bufs
[i
];
342 QETH_DBF_HEX(SETUP
, 2, &q
, sizeof(void *));
346 static int qeth_cq_init(struct qeth_card
*card
)
350 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
351 QETH_CARD_TEXT(card
, 2, "cqinit");
352 qdio_reset_buffers(card
->qdio
.c_q
->qdio_bufs
,
353 QDIO_MAX_BUFFERS_PER_Q
);
354 card
->qdio
.c_q
->next_buf_to_init
= 127;
355 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 1, 0, 127,
358 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
367 static int qeth_alloc_cq(struct qeth_card
*card
)
369 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
370 QETH_CARD_TEXT(card
, 2, "cqon");
371 card
->qdio
.c_q
= qeth_alloc_qdio_queue();
372 if (!card
->qdio
.c_q
) {
373 dev_err(&card
->gdev
->dev
, "Failed to create completion queue\n");
377 QETH_CARD_TEXT(card
, 2, "nocq");
378 card
->qdio
.c_q
= NULL
;
383 static void qeth_free_cq(struct qeth_card
*card
)
385 if (card
->qdio
.c_q
) {
386 qeth_free_qdio_queue(card
->qdio
.c_q
);
387 card
->qdio
.c_q
= NULL
;
391 static enum iucv_tx_notify
qeth_compute_cq_notification(int sbalf15
,
394 enum iucv_tx_notify n
;
398 n
= delayed
? TX_NOTIFY_DELAYED_OK
: TX_NOTIFY_OK
;
404 n
= delayed
? TX_NOTIFY_DELAYED_UNREACHABLE
:
405 TX_NOTIFY_UNREACHABLE
;
408 n
= delayed
? TX_NOTIFY_DELAYED_GENERALERROR
:
409 TX_NOTIFY_GENERALERROR
;
416 static void qeth_put_cmd(struct qeth_cmd_buffer
*iob
)
418 if (refcount_dec_and_test(&iob
->ref_count
)) {
423 static void qeth_setup_ccw(struct ccw1
*ccw
, u8 cmd_code
, u8 flags
, u32 len
,
426 ccw
->cmd_code
= cmd_code
;
427 ccw
->flags
= flags
| CCW_FLAG_SLI
;
429 ccw
->cda
= (__u32
) __pa(data
);
432 static int __qeth_issue_next_read(struct qeth_card
*card
)
434 struct qeth_cmd_buffer
*iob
= card
->read_cmd
;
435 struct qeth_channel
*channel
= iob
->channel
;
436 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
439 QETH_CARD_TEXT(card
, 5, "issnxrd");
440 if (channel
->state
!= CH_STATE_UP
)
443 memset(iob
->data
, 0, iob
->length
);
444 qeth_setup_ccw(ccw
, CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
445 iob
->callback
= qeth_issue_next_read_cb
;
446 /* keep the cmd alive after completion: */
449 QETH_CARD_TEXT(card
, 6, "noirqpnd");
450 rc
= ccw_device_start(channel
->ccwdev
, ccw
, (addr_t
) iob
, 0, 0);
452 channel
->active_cmd
= iob
;
454 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
455 rc
, CARD_DEVID(card
));
456 qeth_unlock_channel(card
, channel
);
458 card
->read_or_write_problem
= 1;
459 qeth_schedule_recovery(card
);
464 static int qeth_issue_next_read(struct qeth_card
*card
)
468 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
469 ret
= __qeth_issue_next_read(card
);
470 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card
)));
475 static void qeth_enqueue_cmd(struct qeth_card
*card
,
476 struct qeth_cmd_buffer
*iob
)
478 spin_lock_irq(&card
->lock
);
479 list_add_tail(&iob
->list_entry
, &card
->cmd_waiter_list
);
480 spin_unlock_irq(&card
->lock
);
483 static void qeth_dequeue_cmd(struct qeth_card
*card
,
484 struct qeth_cmd_buffer
*iob
)
486 spin_lock_irq(&card
->lock
);
487 list_del(&iob
->list_entry
);
488 spin_unlock_irq(&card
->lock
);
491 static void qeth_notify_cmd(struct qeth_cmd_buffer
*iob
, int reason
)
494 complete(&iob
->done
);
497 static void qeth_flush_local_addrs4(struct qeth_card
*card
)
499 struct qeth_local_addr
*addr
;
500 struct hlist_node
*tmp
;
503 spin_lock_irq(&card
->local_addrs4_lock
);
504 hash_for_each_safe(card
->local_addrs4
, i
, tmp
, addr
, hnode
) {
505 hash_del_rcu(&addr
->hnode
);
506 kfree_rcu(addr
, rcu
);
508 spin_unlock_irq(&card
->local_addrs4_lock
);
511 static void qeth_flush_local_addrs6(struct qeth_card
*card
)
513 struct qeth_local_addr
*addr
;
514 struct hlist_node
*tmp
;
517 spin_lock_irq(&card
->local_addrs6_lock
);
518 hash_for_each_safe(card
->local_addrs6
, i
, tmp
, addr
, hnode
) {
519 hash_del_rcu(&addr
->hnode
);
520 kfree_rcu(addr
, rcu
);
522 spin_unlock_irq(&card
->local_addrs6_lock
);
525 static void qeth_flush_local_addrs(struct qeth_card
*card
)
527 qeth_flush_local_addrs4(card
);
528 qeth_flush_local_addrs6(card
);
531 static void qeth_add_local_addrs4(struct qeth_card
*card
,
532 struct qeth_ipacmd_local_addrs4
*cmd
)
536 if (cmd
->addr_length
!=
537 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
538 dev_err_ratelimited(&card
->gdev
->dev
,
539 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
544 spin_lock(&card
->local_addrs4_lock
);
545 for (i
= 0; i
< cmd
->count
; i
++) {
546 unsigned int key
= ipv4_addr_hash(cmd
->addrs
[i
].addr
);
547 struct qeth_local_addr
*addr
;
548 bool duplicate
= false;
550 hash_for_each_possible(card
->local_addrs4
, addr
, hnode
, key
) {
551 if (addr
->addr
.s6_addr32
[3] == cmd
->addrs
[i
].addr
) {
560 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
562 dev_err(&card
->gdev
->dev
,
563 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
564 &cmd
->addrs
[i
].addr
);
568 ipv6_addr_set(&addr
->addr
, 0, 0, 0, cmd
->addrs
[i
].addr
);
569 hash_add_rcu(card
->local_addrs4
, &addr
->hnode
, key
);
571 spin_unlock(&card
->local_addrs4_lock
);
574 static void qeth_add_local_addrs6(struct qeth_card
*card
,
575 struct qeth_ipacmd_local_addrs6
*cmd
)
579 if (cmd
->addr_length
!=
580 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
581 dev_err_ratelimited(&card
->gdev
->dev
,
582 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
587 spin_lock(&card
->local_addrs6_lock
);
588 for (i
= 0; i
< cmd
->count
; i
++) {
589 u32 key
= ipv6_addr_hash(&cmd
->addrs
[i
].addr
);
590 struct qeth_local_addr
*addr
;
591 bool duplicate
= false;
593 hash_for_each_possible(card
->local_addrs6
, addr
, hnode
, key
) {
594 if (ipv6_addr_equal(&addr
->addr
, &cmd
->addrs
[i
].addr
)) {
603 addr
= kmalloc(sizeof(*addr
), GFP_ATOMIC
);
605 dev_err(&card
->gdev
->dev
,
606 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
607 &cmd
->addrs
[i
].addr
);
611 addr
->addr
= cmd
->addrs
[i
].addr
;
612 hash_add_rcu(card
->local_addrs6
, &addr
->hnode
, key
);
614 spin_unlock(&card
->local_addrs6_lock
);
617 static void qeth_del_local_addrs4(struct qeth_card
*card
,
618 struct qeth_ipacmd_local_addrs4
*cmd
)
622 if (cmd
->addr_length
!=
623 sizeof_field(struct qeth_ipacmd_local_addr4
, addr
)) {
624 dev_err_ratelimited(&card
->gdev
->dev
,
625 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
630 spin_lock(&card
->local_addrs4_lock
);
631 for (i
= 0; i
< cmd
->count
; i
++) {
632 struct qeth_ipacmd_local_addr4
*addr
= &cmd
->addrs
[i
];
633 unsigned int key
= ipv4_addr_hash(addr
->addr
);
634 struct qeth_local_addr
*tmp
;
636 hash_for_each_possible(card
->local_addrs4
, tmp
, hnode
, key
) {
637 if (tmp
->addr
.s6_addr32
[3] == addr
->addr
) {
638 hash_del_rcu(&tmp
->hnode
);
644 spin_unlock(&card
->local_addrs4_lock
);
647 static void qeth_del_local_addrs6(struct qeth_card
*card
,
648 struct qeth_ipacmd_local_addrs6
*cmd
)
652 if (cmd
->addr_length
!=
653 sizeof_field(struct qeth_ipacmd_local_addr6
, addr
)) {
654 dev_err_ratelimited(&card
->gdev
->dev
,
655 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
660 spin_lock(&card
->local_addrs6_lock
);
661 for (i
= 0; i
< cmd
->count
; i
++) {
662 struct qeth_ipacmd_local_addr6
*addr
= &cmd
->addrs
[i
];
663 u32 key
= ipv6_addr_hash(&addr
->addr
);
664 struct qeth_local_addr
*tmp
;
666 hash_for_each_possible(card
->local_addrs6
, tmp
, hnode
, key
) {
667 if (ipv6_addr_equal(&tmp
->addr
, &addr
->addr
)) {
668 hash_del_rcu(&tmp
->hnode
);
674 spin_unlock(&card
->local_addrs6_lock
);
677 static bool qeth_next_hop_is_local_v4(struct qeth_card
*card
,
680 struct qeth_local_addr
*tmp
;
681 bool is_local
= false;
685 if (hash_empty(card
->local_addrs4
))
689 next_hop
= qeth_next_hop_v4_rcu(skb
,
690 qeth_dst_check_rcu(skb
, htons(ETH_P_IP
)));
691 key
= ipv4_addr_hash(next_hop
);
693 hash_for_each_possible_rcu(card
->local_addrs4
, tmp
, hnode
, key
) {
694 if (tmp
->addr
.s6_addr32
[3] == next_hop
) {
704 static bool qeth_next_hop_is_local_v6(struct qeth_card
*card
,
707 struct qeth_local_addr
*tmp
;
708 struct in6_addr
*next_hop
;
709 bool is_local
= false;
712 if (hash_empty(card
->local_addrs6
))
716 next_hop
= qeth_next_hop_v6_rcu(skb
,
717 qeth_dst_check_rcu(skb
, htons(ETH_P_IPV6
)));
718 key
= ipv6_addr_hash(next_hop
);
720 hash_for_each_possible_rcu(card
->local_addrs6
, tmp
, hnode
, key
) {
721 if (ipv6_addr_equal(&tmp
->addr
, next_hop
)) {
731 static int qeth_debugfs_local_addr_show(struct seq_file
*m
, void *v
)
733 struct qeth_card
*card
= m
->private;
734 struct qeth_local_addr
*tmp
;
738 hash_for_each_rcu(card
->local_addrs4
, i
, tmp
, hnode
)
739 seq_printf(m
, "%pI4\n", &tmp
->addr
.s6_addr32
[3]);
740 hash_for_each_rcu(card
->local_addrs6
, i
, tmp
, hnode
)
741 seq_printf(m
, "%pI6c\n", &tmp
->addr
);
747 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr
);
749 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd
*cmd
, int rc
,
750 struct qeth_card
*card
)
752 const char *ipa_name
;
753 int com
= cmd
->hdr
.command
;
755 ipa_name
= qeth_get_ipa_cmd_name(com
);
758 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
759 ipa_name
, com
, CARD_DEVID(card
), rc
,
760 qeth_get_ipa_msg(rc
));
762 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
763 ipa_name
, com
, CARD_DEVID(card
));
766 static struct qeth_ipa_cmd
*qeth_check_ipa_data(struct qeth_card
*card
,
767 struct qeth_ipa_cmd
*cmd
)
769 QETH_CARD_TEXT(card
, 5, "chkipad");
771 if (IS_IPA_REPLY(cmd
)) {
772 if (cmd
->hdr
.command
!= IPA_CMD_SET_DIAG_ASS
)
773 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
777 /* handle unsolicited event: */
778 switch (cmd
->hdr
.command
) {
779 case IPA_CMD_STOPLAN
:
780 if (cmd
->hdr
.return_code
== IPA_RC_VEPA_TO_VEB_TRANSITION
) {
781 dev_err(&card
->gdev
->dev
,
782 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
783 netdev_name(card
->dev
));
784 /* Set offline, then probably fail to set online: */
785 qeth_schedule_recovery(card
);
787 /* stay online for subsequent STARTLAN */
788 dev_warn(&card
->gdev
->dev
,
789 "The link for interface %s on CHPID 0x%X failed\n",
790 netdev_name(card
->dev
), card
->info
.chpid
);
791 qeth_issue_ipa_msg(cmd
, cmd
->hdr
.return_code
, card
);
792 netif_carrier_off(card
->dev
);
795 case IPA_CMD_STARTLAN
:
796 dev_info(&card
->gdev
->dev
,
797 "The link for %s on CHPID 0x%X has been restored\n",
798 netdev_name(card
->dev
), card
->info
.chpid
);
799 if (card
->info
.hwtrap
)
800 card
->info
.hwtrap
= 2;
801 qeth_schedule_recovery(card
);
803 case IPA_CMD_SETBRIDGEPORT_IQD
:
804 case IPA_CMD_SETBRIDGEPORT_OSA
:
805 case IPA_CMD_ADDRESS_CHANGE_NOTIF
:
806 if (card
->discipline
->control_event_handler(card
, cmd
))
809 case IPA_CMD_REGISTER_LOCAL_ADDR
:
810 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
811 qeth_add_local_addrs4(card
, &cmd
->data
.local_addrs4
);
812 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
813 qeth_add_local_addrs6(card
, &cmd
->data
.local_addrs6
);
815 QETH_CARD_TEXT(card
, 3, "irla");
817 case IPA_CMD_UNREGISTER_LOCAL_ADDR
:
818 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
819 qeth_del_local_addrs4(card
, &cmd
->data
.local_addrs4
);
820 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
821 qeth_del_local_addrs6(card
, &cmd
->data
.local_addrs6
);
823 QETH_CARD_TEXT(card
, 3, "urla");
826 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
831 static void qeth_clear_ipacmd_list(struct qeth_card
*card
)
833 struct qeth_cmd_buffer
*iob
;
836 QETH_CARD_TEXT(card
, 4, "clipalst");
838 spin_lock_irqsave(&card
->lock
, flags
);
839 list_for_each_entry(iob
, &card
->cmd_waiter_list
, list_entry
)
840 qeth_notify_cmd(iob
, -ECANCELED
);
841 spin_unlock_irqrestore(&card
->lock
, flags
);
844 static int qeth_check_idx_response(struct qeth_card
*card
,
845 unsigned char *buffer
)
847 QETH_DBF_HEX(CTRL
, 2, buffer
, QETH_DBF_CTRL_LEN
);
848 if ((buffer
[2] & QETH_IDX_TERMINATE_MASK
) == QETH_IDX_TERMINATE
) {
849 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
851 QETH_CARD_TEXT(card
, 2, "ckidxres");
852 QETH_CARD_TEXT(card
, 2, " idxterm");
853 QETH_CARD_TEXT_(card
, 2, "rc%x", buffer
[4]);
854 if (buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT
||
855 buffer
[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM
) {
856 dev_err(&card
->gdev
->dev
,
857 "The device does not support the configured transport mode\n");
858 return -EPROTONOSUPPORT
;
865 static void qeth_release_buffer_cb(struct qeth_card
*card
,
866 struct qeth_cmd_buffer
*iob
,
867 unsigned int data_length
)
872 static void qeth_cancel_cmd(struct qeth_cmd_buffer
*iob
, int rc
)
874 qeth_notify_cmd(iob
, rc
);
878 static struct qeth_cmd_buffer
*qeth_alloc_cmd(struct qeth_channel
*channel
,
880 unsigned int ccws
, long timeout
)
882 struct qeth_cmd_buffer
*iob
;
884 if (length
> QETH_BUFSIZE
)
887 iob
= kzalloc(sizeof(*iob
), GFP_KERNEL
);
891 iob
->data
= kzalloc(ALIGN(length
, 8) + ccws
* sizeof(struct ccw1
),
892 GFP_KERNEL
| GFP_DMA
);
898 init_completion(&iob
->done
);
899 spin_lock_init(&iob
->lock
);
900 refcount_set(&iob
->ref_count
, 1);
901 iob
->channel
= channel
;
902 iob
->timeout
= timeout
;
903 iob
->length
= length
;
907 static void qeth_issue_next_read_cb(struct qeth_card
*card
,
908 struct qeth_cmd_buffer
*iob
,
909 unsigned int data_length
)
911 struct qeth_cmd_buffer
*request
= NULL
;
912 struct qeth_ipa_cmd
*cmd
= NULL
;
913 struct qeth_reply
*reply
= NULL
;
914 struct qeth_cmd_buffer
*tmp
;
918 QETH_CARD_TEXT(card
, 4, "sndctlcb");
919 rc
= qeth_check_idx_response(card
, iob
->data
);
924 qeth_schedule_recovery(card
);
927 qeth_clear_ipacmd_list(card
);
931 cmd
= __ipa_reply(iob
);
933 cmd
= qeth_check_ipa_data(card
, cmd
);
938 /* match against pending cmd requests */
939 spin_lock_irqsave(&card
->lock
, flags
);
940 list_for_each_entry(tmp
, &card
->cmd_waiter_list
, list_entry
) {
941 if (tmp
->match
&& tmp
->match(tmp
, iob
)) {
943 /* take the object outside the lock */
944 qeth_get_cmd(request
);
948 spin_unlock_irqrestore(&card
->lock
, flags
);
953 reply
= &request
->reply
;
954 if (!reply
->callback
) {
959 spin_lock_irqsave(&request
->lock
, flags
);
961 /* Bail out when the requestor has already left: */
964 rc
= reply
->callback(card
, reply
, cmd
? (unsigned long)cmd
:
966 spin_unlock_irqrestore(&request
->lock
, flags
);
970 qeth_notify_cmd(request
, rc
);
971 qeth_put_cmd(request
);
973 memcpy(&card
->seqno
.pdu_hdr_ack
,
974 QETH_PDU_HEADER_SEQ_NO(iob
->data
),
976 __qeth_issue_next_read(card
);
981 static int qeth_set_thread_start_bit(struct qeth_card
*card
,
982 unsigned long thread
)
987 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
988 if (!(card
->thread_allowed_mask
& thread
))
990 else if (card
->thread_start_mask
& thread
)
993 card
->thread_start_mask
|= thread
;
994 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
999 static void qeth_clear_thread_start_bit(struct qeth_card
*card
,
1000 unsigned long thread
)
1002 unsigned long flags
;
1004 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1005 card
->thread_start_mask
&= ~thread
;
1006 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1007 wake_up(&card
->wait_q
);
1010 static void qeth_clear_thread_running_bit(struct qeth_card
*card
,
1011 unsigned long thread
)
1013 unsigned long flags
;
1015 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1016 card
->thread_running_mask
&= ~thread
;
1017 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1018 wake_up_all(&card
->wait_q
);
1021 static int __qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1023 unsigned long flags
;
1026 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1027 if (card
->thread_start_mask
& thread
) {
1028 if ((card
->thread_allowed_mask
& thread
) &&
1029 !(card
->thread_running_mask
& thread
)) {
1031 card
->thread_start_mask
&= ~thread
;
1032 card
->thread_running_mask
|= thread
;
1036 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1040 static int qeth_do_run_thread(struct qeth_card
*card
, unsigned long thread
)
1044 wait_event(card
->wait_q
,
1045 (rc
= __qeth_do_run_thread(card
, thread
)) >= 0);
1049 int qeth_schedule_recovery(struct qeth_card
*card
)
1053 QETH_CARD_TEXT(card
, 2, "startrec");
1055 rc
= qeth_set_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1057 schedule_work(&card
->kernel_thread_starter
);
1062 static int qeth_get_problem(struct qeth_card
*card
, struct ccw_device
*cdev
,
1068 sense
= (char *) irb
->ecw
;
1069 cstat
= irb
->scsw
.cmd
.cstat
;
1070 dstat
= irb
->scsw
.cmd
.dstat
;
1072 if (cstat
& (SCHN_STAT_CHN_CTRL_CHK
| SCHN_STAT_INTF_CTRL_CHK
|
1073 SCHN_STAT_CHN_DATA_CHK
| SCHN_STAT_CHAIN_CHECK
|
1074 SCHN_STAT_PROT_CHECK
| SCHN_STAT_PROG_CHECK
)) {
1075 QETH_CARD_TEXT(card
, 2, "CGENCHK");
1076 dev_warn(&cdev
->dev
, "The qeth device driver "
1077 "failed to recover an error on the device\n");
1078 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1079 CCW_DEVID(cdev
), dstat
, cstat
);
1080 print_hex_dump(KERN_WARNING
, "qeth: irb ", DUMP_PREFIX_OFFSET
,
1085 if (dstat
& DEV_STAT_UNIT_CHECK
) {
1086 if (sense
[SENSE_RESETTING_EVENT_BYTE
] &
1087 SENSE_RESETTING_EVENT_FLAG
) {
1088 QETH_CARD_TEXT(card
, 2, "REVIND");
1091 if (sense
[SENSE_COMMAND_REJECT_BYTE
] &
1092 SENSE_COMMAND_REJECT_FLAG
) {
1093 QETH_CARD_TEXT(card
, 2, "CMDREJi");
1096 if ((sense
[2] == 0xaf) && (sense
[3] == 0xfe)) {
1097 QETH_CARD_TEXT(card
, 2, "AFFE");
1100 if ((!sense
[0]) && (!sense
[1]) && (!sense
[2]) && (!sense
[3])) {
1101 QETH_CARD_TEXT(card
, 2, "ZEROSEN");
1104 QETH_CARD_TEXT(card
, 2, "DGENCHK");
1110 static int qeth_check_irb_error(struct qeth_card
*card
, struct ccw_device
*cdev
,
1116 switch (PTR_ERR(irb
)) {
1118 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1120 QETH_CARD_TEXT(card
, 2, "ckirberr");
1121 QETH_CARD_TEXT_(card
, 2, " rc%d", -EIO
);
1124 dev_warn(&cdev
->dev
, "A hardware operation timed out"
1125 " on the device\n");
1126 QETH_CARD_TEXT(card
, 2, "ckirberr");
1127 QETH_CARD_TEXT_(card
, 2, " rc%d", -ETIMEDOUT
);
1130 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1131 PTR_ERR(irb
), CCW_DEVID(cdev
));
1132 QETH_CARD_TEXT(card
, 2, "ckirberr");
1133 QETH_CARD_TEXT(card
, 2, " rc???");
1134 return PTR_ERR(irb
);
1138 static void qeth_irq(struct ccw_device
*cdev
, unsigned long intparm
,
1143 struct qeth_cmd_buffer
*iob
= NULL
;
1144 struct ccwgroup_device
*gdev
;
1145 struct qeth_channel
*channel
;
1146 struct qeth_card
*card
;
1148 /* while we hold the ccwdev lock, this stays valid: */
1149 gdev
= dev_get_drvdata(&cdev
->dev
);
1150 card
= dev_get_drvdata(&gdev
->dev
);
1152 QETH_CARD_TEXT(card
, 5, "irq");
1154 if (card
->read
.ccwdev
== cdev
) {
1155 channel
= &card
->read
;
1156 QETH_CARD_TEXT(card
, 5, "read");
1157 } else if (card
->write
.ccwdev
== cdev
) {
1158 channel
= &card
->write
;
1159 QETH_CARD_TEXT(card
, 5, "write");
1161 channel
= &card
->data
;
1162 QETH_CARD_TEXT(card
, 5, "data");
1166 QETH_CARD_TEXT(card
, 5, "irqunsol");
1167 } else if ((addr_t
)intparm
!= (addr_t
)channel
->active_cmd
) {
1168 QETH_CARD_TEXT(card
, 5, "irqunexp");
1171 "Received IRQ with intparm %lx, expected %px\n",
1172 intparm
, channel
->active_cmd
);
1173 if (channel
->active_cmd
)
1174 qeth_cancel_cmd(channel
->active_cmd
, -EIO
);
1176 iob
= (struct qeth_cmd_buffer
*) (addr_t
)intparm
;
1179 qeth_unlock_channel(card
, channel
);
1181 rc
= qeth_check_irb_error(card
, cdev
, irb
);
1183 /* IO was terminated, free its resources. */
1185 qeth_cancel_cmd(iob
, rc
);
1189 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
1190 channel
->state
= CH_STATE_STOPPED
;
1191 wake_up(&card
->wait_q
);
1194 if (irb
->scsw
.cmd
.fctl
& SCSW_FCTL_HALT_FUNC
) {
1195 channel
->state
= CH_STATE_HALTED
;
1196 wake_up(&card
->wait_q
);
1199 if (iob
&& (irb
->scsw
.cmd
.fctl
& (SCSW_FCTL_CLEAR_FUNC
|
1200 SCSW_FCTL_HALT_FUNC
))) {
1201 qeth_cancel_cmd(iob
, -ECANCELED
);
1205 cstat
= irb
->scsw
.cmd
.cstat
;
1206 dstat
= irb
->scsw
.cmd
.dstat
;
1208 if ((dstat
& DEV_STAT_UNIT_EXCEP
) ||
1209 (dstat
& DEV_STAT_UNIT_CHECK
) ||
1211 if (irb
->esw
.esw0
.erw
.cons
) {
1212 dev_warn(&channel
->ccwdev
->dev
,
1213 "The qeth device driver failed to recover "
1214 "an error on the device\n");
1215 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1216 CCW_DEVID(channel
->ccwdev
), cstat
,
1218 print_hex_dump(KERN_WARNING
, "qeth: irb ",
1219 DUMP_PREFIX_OFFSET
, 16, 1, irb
, 32, 1);
1220 print_hex_dump(KERN_WARNING
, "qeth: sense data ",
1221 DUMP_PREFIX_OFFSET
, 16, 1, irb
->ecw
, 32, 1);
1224 rc
= qeth_get_problem(card
, cdev
, irb
);
1226 card
->read_or_write_problem
= 1;
1228 qeth_cancel_cmd(iob
, rc
);
1229 qeth_clear_ipacmd_list(card
);
1230 qeth_schedule_recovery(card
);
1237 if (irb
->scsw
.cmd
.count
> iob
->length
) {
1238 qeth_cancel_cmd(iob
, -EIO
);
1242 iob
->callback(card
, iob
,
1243 iob
->length
- irb
->scsw
.cmd
.count
);
1247 static void qeth_notify_skbs(struct qeth_qdio_out_q
*q
,
1248 struct qeth_qdio_out_buffer
*buf
,
1249 enum iucv_tx_notify notification
)
1251 struct sk_buff
*skb
;
1253 skb_queue_walk(&buf
->skb_list
, skb
) {
1254 struct sock
*sk
= skb
->sk
;
1256 QETH_CARD_TEXT_(q
->card
, 5, "skbn%d", notification
);
1257 QETH_CARD_TEXT_(q
->card
, 5, "%lx", (long) skb
);
1258 if (sk
&& sk
->sk_family
== PF_IUCV
)
1259 iucv_sk(sk
)->sk_txnotify(sk
, notification
);
1263 static void qeth_tx_complete_buf(struct qeth_qdio_out_q
*queue
,
1264 struct qeth_qdio_out_buffer
*buf
, bool error
,
1267 struct sk_buff
*skb
;
1270 if (buf
->next_element_to_fill
== 0)
1273 QETH_TXQ_STAT_INC(queue
, bufs
);
1274 QETH_TXQ_STAT_ADD(queue
, buf_elements
, buf
->next_element_to_fill
);
1276 QETH_TXQ_STAT_ADD(queue
, tx_errors
, buf
->frames
);
1278 QETH_TXQ_STAT_ADD(queue
, tx_packets
, buf
->frames
);
1279 QETH_TXQ_STAT_ADD(queue
, tx_bytes
, buf
->bytes
);
1282 while ((skb
= __skb_dequeue(&buf
->skb_list
)) != NULL
) {
1283 unsigned int bytes
= qdisc_pkt_len(skb
);
1284 bool is_tso
= skb_is_gso(skb
);
1285 unsigned int packets
;
1287 packets
= is_tso
? skb_shinfo(skb
)->gso_segs
: 1;
1289 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1290 QETH_TXQ_STAT_ADD(queue
, skbs_csum
, packets
);
1291 if (skb_is_nonlinear(skb
))
1292 QETH_TXQ_STAT_INC(queue
, skbs_sg
);
1294 QETH_TXQ_STAT_INC(queue
, skbs_tso
);
1295 QETH_TXQ_STAT_ADD(queue
, tso_bytes
, bytes
);
1299 napi_consume_skb(skb
, budget
);
1303 static void qeth_clear_output_buffer(struct qeth_qdio_out_q
*queue
,
1304 struct qeth_qdio_out_buffer
*buf
,
1305 bool error
, int budget
)
1309 /* is PCI flag set on buffer? */
1310 if (buf
->buffer
->element
[0].sflags
& SBAL_SFLAGS0_PCI_REQ
) {
1311 atomic_dec(&queue
->set_pci_flags_count
);
1312 QETH_TXQ_STAT_INC(queue
, completion_irq
);
1315 qeth_tx_complete_buf(queue
, buf
, error
, budget
);
1317 for (i
= 0; i
< queue
->max_elements
; ++i
) {
1318 void *data
= phys_to_virt(buf
->buffer
->element
[i
].addr
);
1320 if (__test_and_clear_bit(i
, buf
->from_kmem_cache
) && data
)
1321 kmem_cache_free(qeth_core_header_cache
, data
);
1324 qeth_scrub_qdio_buffer(buf
->buffer
, queue
->max_elements
);
1325 buf
->next_element_to_fill
= 0;
1328 atomic_set(&buf
->state
, QETH_QDIO_BUF_EMPTY
);
1331 static void qeth_free_out_buf(struct qeth_qdio_out_buffer
*buf
)
1334 kmem_cache_free(qeth_qaob_cache
, buf
->aob
);
1335 kmem_cache_free(qeth_qdio_outbuf_cache
, buf
);
1338 static void qeth_tx_complete_pending_bufs(struct qeth_card
*card
,
1339 struct qeth_qdio_out_q
*queue
,
1340 bool drain
, int budget
)
1342 struct qeth_qdio_out_buffer
*buf
, *tmp
;
1344 list_for_each_entry_safe(buf
, tmp
, &queue
->pending_bufs
, list_entry
) {
1345 struct qeth_qaob_priv1
*priv
;
1346 struct qaob
*aob
= buf
->aob
;
1347 enum iucv_tx_notify notify
;
1350 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
1351 if (drain
|| READ_ONCE(priv
->state
) == QETH_QAOB_DONE
) {
1352 QETH_CARD_TEXT(card
, 5, "fp");
1353 QETH_CARD_TEXT_(card
, 5, "%lx", (long) buf
);
1355 notify
= drain
? TX_NOTIFY_GENERALERROR
:
1356 qeth_compute_cq_notification(aob
->aorc
, 1);
1357 qeth_notify_skbs(queue
, buf
, notify
);
1358 qeth_tx_complete_buf(queue
, buf
, drain
, budget
);
1361 i
< aob
->sb_count
&& i
< queue
->max_elements
;
1363 void *data
= phys_to_virt(aob
->sba
[i
]);
1365 if (test_bit(i
, buf
->from_kmem_cache
) && data
)
1366 kmem_cache_free(qeth_core_header_cache
,
1370 list_del(&buf
->list_entry
);
1371 qeth_free_out_buf(buf
);
1376 static void qeth_drain_output_queue(struct qeth_qdio_out_q
*q
, bool free
)
1380 qeth_tx_complete_pending_bufs(q
->card
, q
, true, 0);
1382 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
1386 qeth_clear_output_buffer(q
, q
->bufs
[j
], true, 0);
1388 qeth_free_out_buf(q
->bufs
[j
]);
1394 static void qeth_drain_output_queues(struct qeth_card
*card
)
1398 QETH_CARD_TEXT(card
, 2, "clearqdbf");
1399 /* clear outbound buffers to free skbs */
1400 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
1401 if (card
->qdio
.out_qs
[i
])
1402 qeth_drain_output_queue(card
->qdio
.out_qs
[i
], false);
1406 static void qeth_osa_set_output_queues(struct qeth_card
*card
, bool single
)
1408 unsigned int max
= single
? 1 : card
->dev
->num_tx_queues
;
1410 if (card
->qdio
.no_out_queues
== max
)
1413 if (atomic_read(&card
->qdio
.state
) != QETH_QDIO_UNINITIALIZED
)
1414 qeth_free_qdio_queues(card
);
1416 if (max
== 1 && card
->qdio
.do_prio_queueing
!= QETH_PRIOQ_DEFAULT
)
1417 dev_info(&card
->gdev
->dev
, "Priority Queueing not supported\n");
1419 card
->qdio
.no_out_queues
= max
;
1422 static int qeth_update_from_chp_desc(struct qeth_card
*card
)
1424 struct ccw_device
*ccwdev
;
1425 struct channel_path_desc_fmt0
*chp_dsc
;
1427 QETH_CARD_TEXT(card
, 2, "chp_desc");
1429 ccwdev
= card
->data
.ccwdev
;
1430 chp_dsc
= ccw_device_get_chp_desc(ccwdev
, 0);
1434 card
->info
.func_level
= 0x4100 + chp_dsc
->desc
;
1436 if (IS_OSD(card
) || IS_OSX(card
))
1437 /* CHPP field bit 6 == 1 -> single queue */
1438 qeth_osa_set_output_queues(card
, chp_dsc
->chpp
& 0x02);
1441 QETH_CARD_TEXT_(card
, 2, "nr:%x", card
->qdio
.no_out_queues
);
1442 QETH_CARD_TEXT_(card
, 2, "lvl:%02x", card
->info
.func_level
);
1446 static void qeth_init_qdio_info(struct qeth_card
*card
)
1448 QETH_CARD_TEXT(card
, 4, "intqdinf");
1449 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
1450 card
->qdio
.do_prio_queueing
= QETH_PRIOQ_DEFAULT
;
1451 card
->qdio
.default_out_queue
= QETH_DEFAULT_QUEUE
;
1454 card
->qdio
.in_buf_size
= QETH_IN_BUF_SIZE_DEFAULT
;
1456 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_HSDEFAULT
;
1458 card
->qdio
.init_pool
.buf_count
= QETH_IN_BUF_COUNT_DEFAULT
;
1459 card
->qdio
.in_buf_pool
.buf_count
= card
->qdio
.init_pool
.buf_count
;
1460 INIT_LIST_HEAD(&card
->qdio
.in_buf_pool
.entry_list
);
1461 INIT_LIST_HEAD(&card
->qdio
.init_pool
.entry_list
);
1464 static void qeth_set_initial_options(struct qeth_card
*card
)
1466 card
->options
.route4
.type
= NO_ROUTER
;
1467 card
->options
.route6
.type
= NO_ROUTER
;
1468 card
->options
.isolation
= ISOLATION_MODE_NONE
;
1469 card
->options
.cq
= QETH_CQ_DISABLED
;
1470 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
1473 static int qeth_do_start_thread(struct qeth_card
*card
, unsigned long thread
)
1475 unsigned long flags
;
1478 spin_lock_irqsave(&card
->thread_mask_lock
, flags
);
1479 QETH_CARD_TEXT_(card
, 4, " %02x%02x%02x",
1480 (u8
) card
->thread_start_mask
,
1481 (u8
) card
->thread_allowed_mask
,
1482 (u8
) card
->thread_running_mask
);
1483 rc
= (card
->thread_start_mask
& thread
);
1484 spin_unlock_irqrestore(&card
->thread_mask_lock
, flags
);
1488 static int qeth_do_reset(void *data
);
1489 static void qeth_start_kernel_thread(struct work_struct
*work
)
1491 struct task_struct
*ts
;
1492 struct qeth_card
*card
= container_of(work
, struct qeth_card
,
1493 kernel_thread_starter
);
1494 QETH_CARD_TEXT(card
, 2, "strthrd");
1496 if (card
->read
.state
!= CH_STATE_UP
&&
1497 card
->write
.state
!= CH_STATE_UP
)
1499 if (qeth_do_start_thread(card
, QETH_RECOVER_THREAD
)) {
1500 ts
= kthread_run(qeth_do_reset
, card
, "qeth_recover");
1502 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
1503 qeth_clear_thread_running_bit(card
,
1504 QETH_RECOVER_THREAD
);
1509 static void qeth_buffer_reclaim_work(struct work_struct
*);
1510 static void qeth_setup_card(struct qeth_card
*card
)
1512 QETH_CARD_TEXT(card
, 2, "setupcrd");
1514 card
->info
.type
= CARD_RDEV(card
)->id
.driver_info
;
1515 card
->state
= CARD_STATE_DOWN
;
1516 spin_lock_init(&card
->lock
);
1517 spin_lock_init(&card
->thread_mask_lock
);
1518 mutex_init(&card
->conf_mutex
);
1519 mutex_init(&card
->discipline_mutex
);
1520 INIT_WORK(&card
->kernel_thread_starter
, qeth_start_kernel_thread
);
1521 INIT_LIST_HEAD(&card
->cmd_waiter_list
);
1522 init_waitqueue_head(&card
->wait_q
);
1523 qeth_set_initial_options(card
);
1524 /* IP address takeover */
1525 INIT_LIST_HEAD(&card
->ipato
.entries
);
1526 qeth_init_qdio_info(card
);
1527 INIT_DELAYED_WORK(&card
->buffer_reclaim_work
, qeth_buffer_reclaim_work
);
1528 hash_init(card
->rx_mode_addrs
);
1529 hash_init(card
->local_addrs4
);
1530 hash_init(card
->local_addrs6
);
1531 spin_lock_init(&card
->local_addrs4_lock
);
1532 spin_lock_init(&card
->local_addrs6_lock
);
1535 static void qeth_core_sl_print(struct seq_file
*m
, struct service_level
*slr
)
1537 struct qeth_card
*card
= container_of(slr
, struct qeth_card
,
1538 qeth_service_level
);
1539 if (card
->info
.mcl_level
[0])
1540 seq_printf(m
, "qeth: %s firmware level %s\n",
1541 CARD_BUS_ID(card
), card
->info
.mcl_level
);
1544 static struct qeth_card
*qeth_alloc_card(struct ccwgroup_device
*gdev
)
1546 struct qeth_card
*card
;
1548 QETH_DBF_TEXT(SETUP
, 2, "alloccrd");
1549 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
1552 QETH_DBF_HEX(SETUP
, 2, &card
, sizeof(void *));
1555 dev_set_drvdata(&gdev
->dev
, card
);
1556 CARD_RDEV(card
) = gdev
->cdev
[0];
1557 CARD_WDEV(card
) = gdev
->cdev
[1];
1558 CARD_DDEV(card
) = gdev
->cdev
[2];
1560 card
->event_wq
= alloc_ordered_workqueue("%s_event", 0,
1561 dev_name(&gdev
->dev
));
1562 if (!card
->event_wq
)
1565 card
->read_cmd
= qeth_alloc_cmd(&card
->read
, QETH_BUFSIZE
, 1, 0);
1566 if (!card
->read_cmd
)
1569 card
->debugfs
= debugfs_create_dir(dev_name(&gdev
->dev
),
1571 debugfs_create_file("local_addrs", 0400, card
->debugfs
, card
,
1572 &qeth_debugfs_local_addr_fops
);
1574 card
->qeth_service_level
.seq_print
= qeth_core_sl_print
;
1575 register_service_level(&card
->qeth_service_level
);
1579 destroy_workqueue(card
->event_wq
);
1581 dev_set_drvdata(&gdev
->dev
, NULL
);
1587 static int qeth_clear_channel(struct qeth_card
*card
,
1588 struct qeth_channel
*channel
)
1592 QETH_CARD_TEXT(card
, 3, "clearch");
1593 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1594 rc
= ccw_device_clear(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1595 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1599 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1600 channel
->state
== CH_STATE_STOPPED
, QETH_TIMEOUT
);
1601 if (rc
== -ERESTARTSYS
)
1603 if (channel
->state
!= CH_STATE_STOPPED
)
1605 channel
->state
= CH_STATE_DOWN
;
1609 static int qeth_halt_channel(struct qeth_card
*card
,
1610 struct qeth_channel
*channel
)
1614 QETH_CARD_TEXT(card
, 3, "haltch");
1615 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1616 rc
= ccw_device_halt(channel
->ccwdev
, (addr_t
)channel
->active_cmd
);
1617 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1621 rc
= wait_event_interruptible_timeout(card
->wait_q
,
1622 channel
->state
== CH_STATE_HALTED
, QETH_TIMEOUT
);
1623 if (rc
== -ERESTARTSYS
)
1625 if (channel
->state
!= CH_STATE_HALTED
)
1630 static int qeth_stop_channel(struct qeth_channel
*channel
)
1632 struct ccw_device
*cdev
= channel
->ccwdev
;
1635 rc
= ccw_device_set_offline(cdev
);
1637 spin_lock_irq(get_ccwdev_lock(cdev
));
1638 if (channel
->active_cmd
)
1639 dev_err(&cdev
->dev
, "Stopped channel while cmd %px was still active\n",
1640 channel
->active_cmd
);
1642 cdev
->handler
= NULL
;
1643 spin_unlock_irq(get_ccwdev_lock(cdev
));
1648 static int qeth_start_channel(struct qeth_channel
*channel
)
1650 struct ccw_device
*cdev
= channel
->ccwdev
;
1653 channel
->state
= CH_STATE_DOWN
;
1654 xchg(&channel
->active_cmd
, NULL
);
1656 spin_lock_irq(get_ccwdev_lock(cdev
));
1657 cdev
->handler
= qeth_irq
;
1658 spin_unlock_irq(get_ccwdev_lock(cdev
));
1660 rc
= ccw_device_set_online(cdev
);
1667 spin_lock_irq(get_ccwdev_lock(cdev
));
1668 cdev
->handler
= NULL
;
1669 spin_unlock_irq(get_ccwdev_lock(cdev
));
1673 static int qeth_halt_channels(struct qeth_card
*card
)
1675 int rc1
= 0, rc2
= 0, rc3
= 0;
1677 QETH_CARD_TEXT(card
, 3, "haltchs");
1678 rc1
= qeth_halt_channel(card
, &card
->read
);
1679 rc2
= qeth_halt_channel(card
, &card
->write
);
1680 rc3
= qeth_halt_channel(card
, &card
->data
);
1688 static int qeth_clear_channels(struct qeth_card
*card
)
1690 int rc1
= 0, rc2
= 0, rc3
= 0;
1692 QETH_CARD_TEXT(card
, 3, "clearchs");
1693 rc1
= qeth_clear_channel(card
, &card
->read
);
1694 rc2
= qeth_clear_channel(card
, &card
->write
);
1695 rc3
= qeth_clear_channel(card
, &card
->data
);
1703 static int qeth_clear_halt_card(struct qeth_card
*card
, int halt
)
1707 QETH_CARD_TEXT(card
, 3, "clhacrd");
1710 rc
= qeth_halt_channels(card
);
1713 return qeth_clear_channels(card
);
1716 static int qeth_qdio_clear_card(struct qeth_card
*card
, int use_halt
)
1720 QETH_CARD_TEXT(card
, 3, "qdioclr");
1721 switch (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ESTABLISHED
,
1722 QETH_QDIO_CLEANING
)) {
1723 case QETH_QDIO_ESTABLISHED
:
1725 rc
= qdio_shutdown(CARD_DDEV(card
),
1726 QDIO_FLAG_CLEANUP_USING_HALT
);
1728 rc
= qdio_shutdown(CARD_DDEV(card
),
1729 QDIO_FLAG_CLEANUP_USING_CLEAR
);
1731 QETH_CARD_TEXT_(card
, 3, "1err%d", rc
);
1732 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
1734 case QETH_QDIO_CLEANING
:
1739 rc
= qeth_clear_halt_card(card
, use_halt
);
1741 QETH_CARD_TEXT_(card
, 3, "2err%d", rc
);
1745 static enum qeth_discipline_id
qeth_vm_detect_layer(struct qeth_card
*card
)
1747 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1748 struct diag26c_vnic_resp
*response
= NULL
;
1749 struct diag26c_vnic_req
*request
= NULL
;
1750 struct ccw_dev_id id
;
1754 QETH_CARD_TEXT(card
, 2, "vmlayer");
1756 cpcmd("QUERY USERID", userid
, sizeof(userid
), &rc
);
1760 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
1761 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
1762 if (!request
|| !response
) {
1767 ccw_device_get_id(CARD_RDEV(card
), &id
);
1768 request
->resp_buf_len
= sizeof(*response
);
1769 request
->resp_version
= DIAG26C_VERSION6_VM65918
;
1770 request
->req_format
= DIAG26C_VNIC_INFO
;
1772 memcpy(&request
->sys_name
, userid
, 8);
1773 request
->devno
= id
.devno
;
1775 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1776 rc
= diag26c(request
, response
, DIAG26C_PORT_VNIC
);
1777 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
1780 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
1782 if (request
->resp_buf_len
< sizeof(*response
) ||
1783 response
->version
!= request
->resp_version
) {
1788 if (response
->protocol
== VNIC_INFO_PROT_L2
)
1789 disc
= QETH_DISCIPLINE_LAYER2
;
1790 else if (response
->protocol
== VNIC_INFO_PROT_L3
)
1791 disc
= QETH_DISCIPLINE_LAYER3
;
1797 QETH_CARD_TEXT_(card
, 2, "err%x", rc
);
1801 /* Determine whether the device requires a specific layer discipline */
1802 static enum qeth_discipline_id
qeth_enforce_discipline(struct qeth_card
*card
)
1804 enum qeth_discipline_id disc
= QETH_DISCIPLINE_UNDETERMINED
;
1807 disc
= QETH_DISCIPLINE_LAYER2
;
1808 else if (IS_VM_NIC(card
))
1809 disc
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
1810 qeth_vm_detect_layer(card
);
1813 case QETH_DISCIPLINE_LAYER2
:
1814 QETH_CARD_TEXT(card
, 3, "force l2");
1816 case QETH_DISCIPLINE_LAYER3
:
1817 QETH_CARD_TEXT(card
, 3, "force l3");
1820 QETH_CARD_TEXT(card
, 3, "force no");
1826 static void qeth_set_blkt_defaults(struct qeth_card
*card
)
1828 QETH_CARD_TEXT(card
, 2, "cfgblkt");
1830 if (card
->info
.use_v1_blkt
) {
1831 card
->info
.blkt
.time_total
= 0;
1832 card
->info
.blkt
.inter_packet
= 0;
1833 card
->info
.blkt
.inter_packet_jumbo
= 0;
1835 card
->info
.blkt
.time_total
= 250;
1836 card
->info
.blkt
.inter_packet
= 5;
1837 card
->info
.blkt
.inter_packet_jumbo
= 15;
1841 static void qeth_idx_init(struct qeth_card
*card
)
1843 memset(&card
->seqno
, 0, sizeof(card
->seqno
));
1845 card
->token
.issuer_rm_w
= 0x00010103UL
;
1846 card
->token
.cm_filter_w
= 0x00010108UL
;
1847 card
->token
.cm_connection_w
= 0x0001010aUL
;
1848 card
->token
.ulp_filter_w
= 0x0001010bUL
;
1849 card
->token
.ulp_connection_w
= 0x0001010dUL
;
1851 switch (card
->info
.type
) {
1852 case QETH_CARD_TYPE_IQD
:
1853 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_IQD
;
1855 case QETH_CARD_TYPE_OSD
:
1856 card
->info
.func_level
= QETH_IDX_FUNC_LEVEL_OSD
;
1863 static void qeth_idx_finalize_cmd(struct qeth_card
*card
,
1864 struct qeth_cmd_buffer
*iob
)
1866 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob
->data
), &card
->seqno
.trans_hdr
,
1867 QETH_SEQ_NO_LENGTH
);
1868 if (iob
->channel
== &card
->write
)
1869 card
->seqno
.trans_hdr
++;
1872 static int qeth_peer_func_level(int level
)
1874 if ((level
& 0xff) == 8)
1875 return (level
& 0xff) + 0x400;
1876 if (((level
>> 8) & 3) == 1)
1877 return (level
& 0xff) + 0x200;
1881 static void qeth_mpc_finalize_cmd(struct qeth_card
*card
,
1882 struct qeth_cmd_buffer
*iob
)
1884 qeth_idx_finalize_cmd(card
, iob
);
1886 memcpy(QETH_PDU_HEADER_SEQ_NO(iob
->data
),
1887 &card
->seqno
.pdu_hdr
, QETH_SEQ_NO_LENGTH
);
1888 card
->seqno
.pdu_hdr
++;
1889 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob
->data
),
1890 &card
->seqno
.pdu_hdr_ack
, QETH_SEQ_NO_LENGTH
);
1892 iob
->callback
= qeth_release_buffer_cb
;
1895 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer
*iob
,
1896 struct qeth_cmd_buffer
*reply
)
1898 /* MPC cmds are issued strictly in sequence. */
1899 return !IS_IPA(reply
->data
);
1902 static struct qeth_cmd_buffer
*qeth_mpc_alloc_cmd(struct qeth_card
*card
,
1904 unsigned int data_length
)
1906 struct qeth_cmd_buffer
*iob
;
1908 iob
= qeth_alloc_cmd(&card
->write
, data_length
, 1, QETH_TIMEOUT
);
1912 memcpy(iob
->data
, data
, data_length
);
1913 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, data_length
,
1915 iob
->finalize
= qeth_mpc_finalize_cmd
;
1916 iob
->match
= qeth_mpc_match_reply
;
1921 * qeth_send_control_data() - send control command to the card
1922 * @card: qeth_card structure pointer
1923 * @iob: qeth_cmd_buffer pointer
1924 * @reply_cb: callback function pointer
1925 * cb_card: pointer to the qeth_card structure
1926 * cb_reply: pointer to the qeth_reply structure
1927 * cb_cmd: pointer to the original iob for non-IPA
1928 * commands, or to the qeth_ipa_cmd structure
1929 * for the IPA commands.
1930 * @reply_param: private pointer passed to the callback
1932 * Callback function gets called one or more times, with cb_cmd
1933 * pointing to the response returned by the hardware. Callback
1934 * function must return
1935 * > 0 if more reply blocks are expected,
1936 * 0 if the last or only reply block is received, and
1938 * Callback function can get the value of the reply_param pointer from the
1939 * field 'param' of the structure qeth_reply.
1942 static int qeth_send_control_data(struct qeth_card
*card
,
1943 struct qeth_cmd_buffer
*iob
,
1944 int (*reply_cb
)(struct qeth_card
*cb_card
,
1945 struct qeth_reply
*cb_reply
,
1946 unsigned long cb_cmd
),
1949 struct qeth_channel
*channel
= iob
->channel
;
1950 struct qeth_reply
*reply
= &iob
->reply
;
1951 long timeout
= iob
->timeout
;
1954 QETH_CARD_TEXT(card
, 2, "sendctl");
1956 reply
->callback
= reply_cb
;
1957 reply
->param
= reply_param
;
1959 timeout
= wait_event_interruptible_timeout(card
->wait_q
,
1960 qeth_trylock_channel(channel
, iob
),
1964 return (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
1968 iob
->finalize(card
, iob
);
1969 QETH_DBF_HEX(CTRL
, 2, iob
->data
, min(iob
->length
, QETH_DBF_CTRL_LEN
));
1971 qeth_enqueue_cmd(card
, iob
);
1973 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1976 QETH_CARD_TEXT(card
, 6, "noirqpnd");
1977 spin_lock_irq(get_ccwdev_lock(channel
->ccwdev
));
1978 rc
= ccw_device_start_timeout(channel
->ccwdev
, __ccw_from_cmd(iob
),
1979 (addr_t
) iob
, 0, 0, timeout
);
1980 spin_unlock_irq(get_ccwdev_lock(channel
->ccwdev
));
1982 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1983 CARD_DEVID(card
), rc
);
1984 QETH_CARD_TEXT_(card
, 2, " err%d", rc
);
1985 qeth_dequeue_cmd(card
, iob
);
1987 qeth_unlock_channel(card
, channel
);
1991 timeout
= wait_for_completion_interruptible_timeout(&iob
->done
,
1994 rc
= (timeout
== -ERESTARTSYS
) ? -EINTR
: -ETIME
;
1996 qeth_dequeue_cmd(card
, iob
);
1999 /* Wait until the callback for a late reply has completed: */
2000 spin_lock_irq(&iob
->lock
);
2002 /* Zap any callback that's still pending: */
2004 spin_unlock_irq(&iob
->lock
);
2015 struct qeth_node_desc
{
2016 struct node_descriptor nd1
;
2017 struct node_descriptor nd2
;
2018 struct node_descriptor nd3
;
2021 static void qeth_read_conf_data_cb(struct qeth_card
*card
,
2022 struct qeth_cmd_buffer
*iob
,
2023 unsigned int data_length
)
2025 struct qeth_node_desc
*nd
= (struct qeth_node_desc
*) iob
->data
;
2029 QETH_CARD_TEXT(card
, 2, "cfgunit");
2031 if (data_length
< sizeof(*nd
)) {
2036 card
->info
.is_vm_nic
= nd
->nd1
.plant
[0] == _ascebc
['V'] &&
2037 nd
->nd1
.plant
[1] == _ascebc
['M'];
2038 tag
= (u8
*)&nd
->nd1
.tag
;
2039 card
->info
.chpid
= tag
[0];
2040 card
->info
.unit_addr2
= tag
[1];
2042 tag
= (u8
*)&nd
->nd2
.tag
;
2043 card
->info
.cula
= tag
[1];
2045 card
->info
.use_v1_blkt
= nd
->nd3
.model
[0] == 0xF0 &&
2046 nd
->nd3
.model
[1] == 0xF0 &&
2047 nd
->nd3
.model
[2] >= 0xF1 &&
2048 nd
->nd3
.model
[2] <= 0xF4;
2051 qeth_notify_cmd(iob
, rc
);
2055 static int qeth_read_conf_data(struct qeth_card
*card
)
2057 struct qeth_channel
*channel
= &card
->data
;
2058 struct qeth_cmd_buffer
*iob
;
2061 /* scan for RCD command in extended SenseID data */
2062 ciw
= ccw_device_get_ciw(channel
->ccwdev
, CIW_TYPE_RCD
);
2063 if (!ciw
|| ciw
->cmd
== 0)
2065 if (ciw
->count
< sizeof(struct qeth_node_desc
))
2068 iob
= qeth_alloc_cmd(channel
, ciw
->count
, 1, QETH_RCD_TIMEOUT
);
2072 iob
->callback
= qeth_read_conf_data_cb
;
2073 qeth_setup_ccw(__ccw_from_cmd(iob
), ciw
->cmd
, 0, iob
->length
,
2076 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2079 static int qeth_idx_check_activate_response(struct qeth_card
*card
,
2080 struct qeth_channel
*channel
,
2081 struct qeth_cmd_buffer
*iob
)
2085 rc
= qeth_check_idx_response(card
, iob
->data
);
2089 if (QETH_IS_IDX_ACT_POS_REPLY(iob
->data
))
2092 /* negative reply: */
2093 QETH_CARD_TEXT_(card
, 2, "idxneg%c",
2094 QETH_IDX_ACT_CAUSE_CODE(iob
->data
));
2096 switch (QETH_IDX_ACT_CAUSE_CODE(iob
->data
)) {
2097 case QETH_IDX_ACT_ERR_EXCL
:
2098 dev_err(&channel
->ccwdev
->dev
,
2099 "The adapter is used exclusively by another host\n");
2101 case QETH_IDX_ACT_ERR_AUTH
:
2102 case QETH_IDX_ACT_ERR_AUTH_USER
:
2103 dev_err(&channel
->ccwdev
->dev
,
2104 "Setting the device online failed because of insufficient authorization\n");
2107 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2108 CCW_DEVID(channel
->ccwdev
));
2113 static void qeth_idx_activate_read_channel_cb(struct qeth_card
*card
,
2114 struct qeth_cmd_buffer
*iob
,
2115 unsigned int data_length
)
2117 struct qeth_channel
*channel
= iob
->channel
;
2121 QETH_CARD_TEXT(card
, 2, "idxrdcb");
2123 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2127 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2128 if (peer_level
!= qeth_peer_func_level(card
->info
.func_level
)) {
2129 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2130 CCW_DEVID(channel
->ccwdev
),
2131 card
->info
.func_level
, peer_level
);
2136 memcpy(&card
->token
.issuer_rm_r
,
2137 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2138 QETH_MPC_TOKEN_LENGTH
);
2139 memcpy(&card
->info
.mcl_level
[0],
2140 QETH_IDX_REPLY_LEVEL(iob
->data
), QETH_MCL_LENGTH
);
2143 qeth_notify_cmd(iob
, rc
);
2147 static void qeth_idx_activate_write_channel_cb(struct qeth_card
*card
,
2148 struct qeth_cmd_buffer
*iob
,
2149 unsigned int data_length
)
2151 struct qeth_channel
*channel
= iob
->channel
;
2155 QETH_CARD_TEXT(card
, 2, "idxwrcb");
2157 rc
= qeth_idx_check_activate_response(card
, channel
, iob
);
2161 memcpy(&peer_level
, QETH_IDX_ACT_FUNC_LEVEL(iob
->data
), 2);
2162 if ((peer_level
& ~0x0100) !=
2163 qeth_peer_func_level(card
->info
.func_level
)) {
2164 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2165 CCW_DEVID(channel
->ccwdev
),
2166 card
->info
.func_level
, peer_level
);
2171 qeth_notify_cmd(iob
, rc
);
2175 static void qeth_idx_setup_activate_cmd(struct qeth_card
*card
,
2176 struct qeth_cmd_buffer
*iob
)
2178 u16 addr
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2179 u8 port
= ((u8
)card
->dev
->dev_port
) | 0x80;
2180 struct ccw1
*ccw
= __ccw_from_cmd(iob
);
2182 qeth_setup_ccw(&ccw
[0], CCW_CMD_WRITE
, CCW_FLAG_CC
, IDX_ACTIVATE_SIZE
,
2184 qeth_setup_ccw(&ccw
[1], CCW_CMD_READ
, 0, iob
->length
, iob
->data
);
2185 iob
->finalize
= qeth_idx_finalize_cmd
;
2187 port
|= QETH_IDX_ACT_INVAL_FRAME
;
2188 memcpy(QETH_IDX_ACT_PNO(iob
->data
), &port
, 1);
2189 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob
->data
),
2190 &card
->token
.issuer_rm_w
, QETH_MPC_TOKEN_LENGTH
);
2191 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob
->data
),
2192 &card
->info
.func_level
, 2);
2193 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2194 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob
->data
), &addr
, 2);
2197 static int qeth_idx_activate_read_channel(struct qeth_card
*card
)
2199 struct qeth_channel
*channel
= &card
->read
;
2200 struct qeth_cmd_buffer
*iob
;
2203 QETH_CARD_TEXT(card
, 2, "idxread");
2205 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2209 memcpy(iob
->data
, IDX_ACTIVATE_READ
, IDX_ACTIVATE_SIZE
);
2210 qeth_idx_setup_activate_cmd(card
, iob
);
2211 iob
->callback
= qeth_idx_activate_read_channel_cb
;
2213 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2217 channel
->state
= CH_STATE_UP
;
2221 static int qeth_idx_activate_write_channel(struct qeth_card
*card
)
2223 struct qeth_channel
*channel
= &card
->write
;
2224 struct qeth_cmd_buffer
*iob
;
2227 QETH_CARD_TEXT(card
, 2, "idxwrite");
2229 iob
= qeth_alloc_cmd(channel
, QETH_BUFSIZE
, 2, QETH_TIMEOUT
);
2233 memcpy(iob
->data
, IDX_ACTIVATE_WRITE
, IDX_ACTIVATE_SIZE
);
2234 qeth_idx_setup_activate_cmd(card
, iob
);
2235 iob
->callback
= qeth_idx_activate_write_channel_cb
;
2237 rc
= qeth_send_control_data(card
, iob
, NULL
, NULL
);
2241 channel
->state
= CH_STATE_UP
;
2245 static int qeth_cm_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2248 struct qeth_cmd_buffer
*iob
;
2250 QETH_CARD_TEXT(card
, 2, "cmenblcb");
2252 iob
= (struct qeth_cmd_buffer
*) data
;
2253 memcpy(&card
->token
.cm_filter_r
,
2254 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2255 QETH_MPC_TOKEN_LENGTH
);
2259 static int qeth_cm_enable(struct qeth_card
*card
)
2261 struct qeth_cmd_buffer
*iob
;
2263 QETH_CARD_TEXT(card
, 2, "cmenable");
2265 iob
= qeth_mpc_alloc_cmd(card
, CM_ENABLE
, CM_ENABLE_SIZE
);
2269 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob
->data
),
2270 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2271 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob
->data
),
2272 &card
->token
.cm_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2274 return qeth_send_control_data(card
, iob
, qeth_cm_enable_cb
, NULL
);
2277 static int qeth_cm_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2280 struct qeth_cmd_buffer
*iob
;
2282 QETH_CARD_TEXT(card
, 2, "cmsetpcb");
2284 iob
= (struct qeth_cmd_buffer
*) data
;
2285 memcpy(&card
->token
.cm_connection_r
,
2286 QETH_CM_SETUP_RESP_DEST_ADDR(iob
->data
),
2287 QETH_MPC_TOKEN_LENGTH
);
2291 static int qeth_cm_setup(struct qeth_card
*card
)
2293 struct qeth_cmd_buffer
*iob
;
2295 QETH_CARD_TEXT(card
, 2, "cmsetup");
2297 iob
= qeth_mpc_alloc_cmd(card
, CM_SETUP
, CM_SETUP_SIZE
);
2301 memcpy(QETH_CM_SETUP_DEST_ADDR(iob
->data
),
2302 &card
->token
.issuer_rm_r
, QETH_MPC_TOKEN_LENGTH
);
2303 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob
->data
),
2304 &card
->token
.cm_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2305 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob
->data
),
2306 &card
->token
.cm_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2307 return qeth_send_control_data(card
, iob
, qeth_cm_setup_cb
, NULL
);
2310 static bool qeth_is_supported_link_type(struct qeth_card
*card
, u8 link_type
)
2312 if (link_type
== QETH_LINK_TYPE_LANE_TR
||
2313 link_type
== QETH_LINK_TYPE_HSTR
) {
2314 dev_err(&card
->gdev
->dev
, "Unsupported Token Ring device\n");
2321 static int qeth_update_max_mtu(struct qeth_card
*card
, unsigned int max_mtu
)
2323 struct net_device
*dev
= card
->dev
;
2324 unsigned int new_mtu
;
2327 /* IQD needs accurate max MTU to set up its RX buffers: */
2330 /* tolerate quirky HW: */
2331 max_mtu
= ETH_MAX_MTU
;
2336 /* move any device with default MTU to new max MTU: */
2337 new_mtu
= (dev
->mtu
== dev
->max_mtu
) ? max_mtu
: dev
->mtu
;
2339 /* adjust RX buffer size to new max MTU: */
2340 card
->qdio
.in_buf_size
= max_mtu
+ 2 * PAGE_SIZE
;
2341 if (dev
->max_mtu
&& dev
->max_mtu
!= max_mtu
)
2342 qeth_free_qdio_queues(card
);
2346 /* default MTUs for first setup: */
2347 else if (IS_LAYER2(card
))
2348 new_mtu
= ETH_DATA_LEN
;
2350 new_mtu
= ETH_DATA_LEN
- 8; /* allow for LLC + SNAP */
2353 dev
->max_mtu
= max_mtu
;
2354 dev
->mtu
= min(new_mtu
, max_mtu
);
2359 static int qeth_get_mtu_outof_framesize(int framesize
)
2361 switch (framesize
) {
2375 static int qeth_ulp_enable_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2378 __u16 mtu
, framesize
;
2380 struct qeth_cmd_buffer
*iob
;
2383 QETH_CARD_TEXT(card
, 2, "ulpenacb");
2385 iob
= (struct qeth_cmd_buffer
*) data
;
2386 memcpy(&card
->token
.ulp_filter_r
,
2387 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob
->data
),
2388 QETH_MPC_TOKEN_LENGTH
);
2390 memcpy(&framesize
, QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
), 2);
2391 mtu
= qeth_get_mtu_outof_framesize(framesize
);
2393 mtu
= *(__u16
*)QETH_ULP_ENABLE_RESP_MAX_MTU(iob
->data
);
2395 *(u16
*)reply
->param
= mtu
;
2397 memcpy(&len
, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob
->data
), 2);
2398 if (len
>= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE
) {
2400 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob
->data
), 1);
2401 if (!qeth_is_supported_link_type(card
, link_type
))
2402 return -EPROTONOSUPPORT
;
2405 card
->info
.link_type
= link_type
;
2406 QETH_CARD_TEXT_(card
, 2, "link%d", card
->info
.link_type
);
2410 static u8
qeth_mpc_select_prot_type(struct qeth_card
*card
)
2412 return IS_LAYER2(card
) ? QETH_MPC_PROT_L2
: QETH_MPC_PROT_L3
;
2415 static int qeth_ulp_enable(struct qeth_card
*card
)
2417 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2418 struct qeth_cmd_buffer
*iob
;
2422 QETH_CARD_TEXT(card
, 2, "ulpenabl");
2424 iob
= qeth_mpc_alloc_cmd(card
, ULP_ENABLE
, ULP_ENABLE_SIZE
);
2428 *(QETH_ULP_ENABLE_LINKNUM(iob
->data
)) = (u8
) card
->dev
->dev_port
;
2429 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob
->data
), &prot_type
, 1);
2430 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob
->data
),
2431 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2432 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob
->data
),
2433 &card
->token
.ulp_filter_w
, QETH_MPC_TOKEN_LENGTH
);
2434 rc
= qeth_send_control_data(card
, iob
, qeth_ulp_enable_cb
, &max_mtu
);
2437 return qeth_update_max_mtu(card
, max_mtu
);
2440 static int qeth_ulp_setup_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
2443 struct qeth_cmd_buffer
*iob
;
2445 QETH_CARD_TEXT(card
, 2, "ulpstpcb");
2447 iob
= (struct qeth_cmd_buffer
*) data
;
2448 memcpy(&card
->token
.ulp_connection_r
,
2449 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2450 QETH_MPC_TOKEN_LENGTH
);
2451 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob
->data
),
2453 QETH_CARD_TEXT(card
, 2, "olmlimit");
2454 dev_err(&card
->gdev
->dev
, "A connection could not be "
2455 "established because of an OLM limit\n");
2461 static int qeth_ulp_setup(struct qeth_card
*card
)
2464 struct qeth_cmd_buffer
*iob
;
2466 QETH_CARD_TEXT(card
, 2, "ulpsetup");
2468 iob
= qeth_mpc_alloc_cmd(card
, ULP_SETUP
, ULP_SETUP_SIZE
);
2472 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob
->data
),
2473 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2474 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob
->data
),
2475 &card
->token
.ulp_connection_w
, QETH_MPC_TOKEN_LENGTH
);
2476 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob
->data
),
2477 &card
->token
.ulp_filter_r
, QETH_MPC_TOKEN_LENGTH
);
2479 memcpy(QETH_ULP_SETUP_CUA(iob
->data
), &card
->info
.ddev_devno
, 2);
2480 temp
= (card
->info
.cula
<< 8) + card
->info
.unit_addr2
;
2481 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob
->data
), &temp
, 2);
2482 return qeth_send_control_data(card
, iob
, qeth_ulp_setup_cb
, NULL
);
2485 static int qeth_alloc_out_buf(struct qeth_qdio_out_q
*q
, unsigned int bidx
,
2488 struct qeth_qdio_out_buffer
*newbuf
;
2490 newbuf
= kmem_cache_zalloc(qeth_qdio_outbuf_cache
, gfp
);
2494 newbuf
->buffer
= q
->qdio_bufs
[bidx
];
2495 skb_queue_head_init(&newbuf
->skb_list
);
2496 lockdep_set_class(&newbuf
->skb_list
.lock
, &qdio_out_skb_queue_key
);
2497 atomic_set(&newbuf
->state
, QETH_QDIO_BUF_EMPTY
);
2498 q
->bufs
[bidx
] = newbuf
;
2502 static void qeth_free_output_queue(struct qeth_qdio_out_q
*q
)
2507 qeth_drain_output_queue(q
, true);
2508 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2512 static struct qeth_qdio_out_q
*qeth_alloc_output_queue(void)
2514 struct qeth_qdio_out_q
*q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
2520 if (qdio_alloc_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
))
2523 for (i
= 0; i
< QDIO_MAX_BUFFERS_PER_Q
; i
++) {
2524 if (qeth_alloc_out_buf(q
, i
, GFP_KERNEL
))
2532 qeth_free_out_buf(q
->bufs
[--i
]);
2533 qdio_free_buffers(q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2539 static void qeth_tx_completion_timer(struct timer_list
*timer
)
2541 struct qeth_qdio_out_q
*queue
= from_timer(queue
, timer
, timer
);
2543 napi_schedule(&queue
->napi
);
2544 QETH_TXQ_STAT_INC(queue
, completion_timer
);
2547 static int qeth_alloc_qdio_queues(struct qeth_card
*card
)
2551 QETH_CARD_TEXT(card
, 2, "allcqdbf");
2553 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
,
2554 QETH_QDIO_ALLOCATED
) != QETH_QDIO_UNINITIALIZED
)
2557 /* inbound buffer pool */
2558 if (qeth_alloc_buffer_pool(card
))
2559 goto out_buffer_pool
;
2562 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2563 struct qeth_qdio_out_q
*queue
;
2565 queue
= qeth_alloc_output_queue();
2568 QETH_CARD_TEXT_(card
, 2, "outq %i", i
);
2569 QETH_CARD_HEX(card
, 2, &queue
, sizeof(void *));
2570 card
->qdio
.out_qs
[i
] = queue
;
2572 queue
->queue_no
= i
;
2573 INIT_LIST_HEAD(&queue
->pending_bufs
);
2574 spin_lock_init(&queue
->lock
);
2575 timer_setup(&queue
->timer
, qeth_tx_completion_timer
, 0);
2577 queue
->coalesce_usecs
= QETH_TX_COALESCE_USECS
;
2578 queue
->max_coalesced_frames
= QETH_TX_MAX_COALESCED_FRAMES
;
2579 queue
->rescan_usecs
= QETH_TX_TIMER_USECS
;
2581 queue
->coalesce_usecs
= USEC_PER_SEC
;
2582 queue
->max_coalesced_frames
= 0;
2583 queue
->rescan_usecs
= 10 * USEC_PER_SEC
;
2585 queue
->priority
= QETH_QIB_PQUE_PRIO_DEFAULT
;
2589 if (qeth_alloc_cq(card
))
2596 qeth_free_output_queue(card
->qdio
.out_qs
[--i
]);
2597 card
->qdio
.out_qs
[i
] = NULL
;
2599 qeth_free_buffer_pool(card
);
2601 atomic_set(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
);
2605 static void qeth_free_qdio_queues(struct qeth_card
*card
)
2609 if (atomic_xchg(&card
->qdio
.state
, QETH_QDIO_UNINITIALIZED
) ==
2610 QETH_QDIO_UNINITIALIZED
)
2614 for (j
= 0; j
< QDIO_MAX_BUFFERS_PER_Q
; ++j
) {
2615 if (card
->qdio
.in_q
->bufs
[j
].rx_skb
) {
2616 consume_skb(card
->qdio
.in_q
->bufs
[j
].rx_skb
);
2617 card
->qdio
.in_q
->bufs
[j
].rx_skb
= NULL
;
2621 /* inbound buffer pool */
2622 qeth_free_buffer_pool(card
);
2623 /* free outbound qdio_qs */
2624 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
2625 qeth_free_output_queue(card
->qdio
.out_qs
[i
]);
2626 card
->qdio
.out_qs
[i
] = NULL
;
2630 static void qeth_fill_qib_parms(struct qeth_card
*card
,
2631 struct qeth_qib_parms
*parms
)
2633 struct qeth_qdio_out_q
*queue
;
2636 parms
->pcit_magic
[0] = 'P';
2637 parms
->pcit_magic
[1] = 'C';
2638 parms
->pcit_magic
[2] = 'I';
2639 parms
->pcit_magic
[3] = 'T';
2640 ASCEBC(parms
->pcit_magic
, sizeof(parms
->pcit_magic
));
2641 parms
->pcit_a
= QETH_PCI_THRESHOLD_A(card
);
2642 parms
->pcit_b
= QETH_PCI_THRESHOLD_B(card
);
2643 parms
->pcit_c
= QETH_PCI_TIMER_VALUE(card
);
2645 parms
->blkt_magic
[0] = 'B';
2646 parms
->blkt_magic
[1] = 'L';
2647 parms
->blkt_magic
[2] = 'K';
2648 parms
->blkt_magic
[3] = 'T';
2649 ASCEBC(parms
->blkt_magic
, sizeof(parms
->blkt_magic
));
2650 parms
->blkt_total
= card
->info
.blkt
.time_total
;
2651 parms
->blkt_inter_packet
= card
->info
.blkt
.inter_packet
;
2652 parms
->blkt_inter_packet_jumbo
= card
->info
.blkt
.inter_packet_jumbo
;
2654 /* Prio-queueing implicitly uses the default priorities: */
2655 if (qeth_uses_tx_prio_queueing(card
) || card
->qdio
.no_out_queues
== 1)
2658 parms
->pque_magic
[0] = 'P';
2659 parms
->pque_magic
[1] = 'Q';
2660 parms
->pque_magic
[2] = 'U';
2661 parms
->pque_magic
[3] = 'E';
2662 ASCEBC(parms
->pque_magic
, sizeof(parms
->pque_magic
));
2663 parms
->pque_order
= QETH_QIB_PQUE_ORDER_RR
;
2664 parms
->pque_units
= QETH_QIB_PQUE_UNITS_SBAL
;
2666 qeth_for_each_output_queue(card
, queue
, i
)
2667 parms
->pque_priority
[i
] = queue
->priority
;
2670 static int qeth_qdio_activate(struct qeth_card
*card
)
2672 QETH_CARD_TEXT(card
, 3, "qdioact");
2673 return qdio_activate(CARD_DDEV(card
));
2676 static int qeth_dm_act(struct qeth_card
*card
)
2678 struct qeth_cmd_buffer
*iob
;
2680 QETH_CARD_TEXT(card
, 2, "dmact");
2682 iob
= qeth_mpc_alloc_cmd(card
, DM_ACT
, DM_ACT_SIZE
);
2686 memcpy(QETH_DM_ACT_DEST_ADDR(iob
->data
),
2687 &card
->token
.cm_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2688 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob
->data
),
2689 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2690 return qeth_send_control_data(card
, iob
, NULL
, NULL
);
2693 static int qeth_mpc_initialize(struct qeth_card
*card
)
2697 QETH_CARD_TEXT(card
, 2, "mpcinit");
2699 rc
= qeth_issue_next_read(card
);
2701 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2704 rc
= qeth_cm_enable(card
);
2706 QETH_CARD_TEXT_(card
, 2, "2err%d", rc
);
2709 rc
= qeth_cm_setup(card
);
2711 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
2714 rc
= qeth_ulp_enable(card
);
2716 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
2719 rc
= qeth_ulp_setup(card
);
2721 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2724 rc
= qeth_alloc_qdio_queues(card
);
2726 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
2729 rc
= qeth_qdio_establish(card
);
2731 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
2732 qeth_free_qdio_queues(card
);
2735 rc
= qeth_qdio_activate(card
);
2737 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
2740 rc
= qeth_dm_act(card
);
2742 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
2749 static void qeth_print_status_message(struct qeth_card
*card
)
2751 switch (card
->info
.type
) {
2752 case QETH_CARD_TYPE_OSD
:
2753 case QETH_CARD_TYPE_OSM
:
2754 case QETH_CARD_TYPE_OSX
:
2755 /* VM will use a non-zero first character
2756 * to indicate a HiperSockets like reporting
2757 * of the level OSA sets the first character to zero
2759 if (!card
->info
.mcl_level
[0]) {
2760 sprintf(card
->info
.mcl_level
, "%02x%02x",
2761 card
->info
.mcl_level
[2],
2762 card
->info
.mcl_level
[3]);
2766 case QETH_CARD_TYPE_IQD
:
2767 if (IS_VM_NIC(card
) || (card
->info
.mcl_level
[0] & 0x80)) {
2768 card
->info
.mcl_level
[0] = (char) _ebcasc
[(__u8
)
2769 card
->info
.mcl_level
[0]];
2770 card
->info
.mcl_level
[1] = (char) _ebcasc
[(__u8
)
2771 card
->info
.mcl_level
[1]];
2772 card
->info
.mcl_level
[2] = (char) _ebcasc
[(__u8
)
2773 card
->info
.mcl_level
[2]];
2774 card
->info
.mcl_level
[3] = (char) _ebcasc
[(__u8
)
2775 card
->info
.mcl_level
[3]];
2776 card
->info
.mcl_level
[QETH_MCL_LENGTH
] = 0;
2780 memset(&card
->info
.mcl_level
[0], 0, QETH_MCL_LENGTH
+ 1);
2782 dev_info(&card
->gdev
->dev
,
2783 "Device is a%s card%s%s%s\nwith link type %s.\n",
2784 qeth_get_cardname(card
),
2785 (card
->info
.mcl_level
[0]) ? " (level: " : "",
2786 (card
->info
.mcl_level
[0]) ? card
->info
.mcl_level
: "",
2787 (card
->info
.mcl_level
[0]) ? ")" : "",
2788 qeth_get_cardname_short(card
));
2791 static void qeth_initialize_working_pool_list(struct qeth_card
*card
)
2793 struct qeth_buffer_pool_entry
*entry
;
2795 QETH_CARD_TEXT(card
, 5, "inwrklst");
2797 list_for_each_entry(entry
,
2798 &card
->qdio
.init_pool
.entry_list
, init_list
) {
2799 qeth_put_buffer_pool_entry(card
, entry
);
2803 static struct qeth_buffer_pool_entry
*qeth_find_free_buffer_pool_entry(
2804 struct qeth_card
*card
)
2806 struct qeth_buffer_pool_entry
*entry
;
2809 if (list_empty(&card
->qdio
.in_buf_pool
.entry_list
))
2812 list_for_each_entry(entry
, &card
->qdio
.in_buf_pool
.entry_list
, list
) {
2814 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2815 if (page_count(entry
->elements
[i
]) > 1) {
2821 list_del_init(&entry
->list
);
2826 /* no free buffer in pool so take first one and swap pages */
2827 entry
= list_first_entry(&card
->qdio
.in_buf_pool
.entry_list
,
2828 struct qeth_buffer_pool_entry
, list
);
2829 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2830 if (page_count(entry
->elements
[i
]) > 1) {
2831 struct page
*page
= dev_alloc_page();
2836 __free_page(entry
->elements
[i
]);
2837 entry
->elements
[i
] = page
;
2838 QETH_CARD_STAT_INC(card
, rx_sg_alloc_page
);
2841 list_del_init(&entry
->list
);
2845 static int qeth_init_input_buffer(struct qeth_card
*card
,
2846 struct qeth_qdio_buffer
*buf
)
2848 struct qeth_buffer_pool_entry
*pool_entry
= buf
->pool_entry
;
2851 if ((card
->options
.cq
== QETH_CQ_ENABLED
) && (!buf
->rx_skb
)) {
2852 buf
->rx_skb
= netdev_alloc_skb(card
->dev
,
2854 sizeof(struct ipv6hdr
));
2860 pool_entry
= qeth_find_free_buffer_pool_entry(card
);
2864 buf
->pool_entry
= pool_entry
;
2868 * since the buffer is accessed only from the input_tasklet
2869 * there shouldn't be a need to synchronize; also, since we use
2870 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2873 for (i
= 0; i
< QETH_MAX_BUFFER_ELEMENTS(card
); ++i
) {
2874 buf
->buffer
->element
[i
].length
= PAGE_SIZE
;
2875 buf
->buffer
->element
[i
].addr
=
2876 page_to_phys(pool_entry
->elements
[i
]);
2877 if (i
== QETH_MAX_BUFFER_ELEMENTS(card
) - 1)
2878 buf
->buffer
->element
[i
].eflags
= SBAL_EFLAGS_LAST_ENTRY
;
2880 buf
->buffer
->element
[i
].eflags
= 0;
2881 buf
->buffer
->element
[i
].sflags
= 0;
2886 static unsigned int qeth_tx_select_bulk_max(struct qeth_card
*card
,
2887 struct qeth_qdio_out_q
*queue
)
2889 if (!IS_IQD(card
) ||
2890 qeth_iqd_is_mcast_queue(card
, queue
) ||
2891 card
->options
.cq
== QETH_CQ_ENABLED
||
2892 qdio_get_ssqd_desc(CARD_DDEV(card
), &card
->ssqd
))
2895 return card
->ssqd
.mmwc
? card
->ssqd
.mmwc
: 1;
2898 static int qeth_init_qdio_queues(struct qeth_card
*card
)
2900 unsigned int rx_bufs
= card
->qdio
.in_buf_pool
.buf_count
;
2904 QETH_CARD_TEXT(card
, 2, "initqdqs");
2907 qdio_reset_buffers(card
->qdio
.in_q
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2908 memset(&card
->rx
, 0, sizeof(struct qeth_rx
));
2910 qeth_initialize_working_pool_list(card
);
2911 /*give only as many buffers to hardware as we have buffer pool entries*/
2912 for (i
= 0; i
< rx_bufs
; i
++) {
2913 rc
= qeth_init_input_buffer(card
, &card
->qdio
.in_q
->bufs
[i
]);
2918 card
->qdio
.in_q
->next_buf_to_init
= QDIO_BUFNR(rx_bufs
);
2919 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0, 0, rx_bufs
,
2922 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
2927 rc
= qeth_cq_init(card
);
2932 /* outbound queue */
2933 for (i
= 0; i
< card
->qdio
.no_out_queues
; ++i
) {
2934 struct qeth_qdio_out_q
*queue
= card
->qdio
.out_qs
[i
];
2936 qdio_reset_buffers(queue
->qdio_bufs
, QDIO_MAX_BUFFERS_PER_Q
);
2937 queue
->max_elements
= QETH_MAX_BUFFER_ELEMENTS(card
);
2938 queue
->next_buf_to_fill
= 0;
2940 queue
->prev_hdr
= NULL
;
2941 queue
->coalesced_frames
= 0;
2942 queue
->bulk_start
= 0;
2943 queue
->bulk_count
= 0;
2944 queue
->bulk_max
= qeth_tx_select_bulk_max(card
, queue
);
2945 atomic_set(&queue
->used_buffers
, 0);
2946 atomic_set(&queue
->set_pci_flags_count
, 0);
2947 netdev_tx_reset_queue(netdev_get_tx_queue(card
->dev
, i
));
2952 static void qeth_ipa_finalize_cmd(struct qeth_card
*card
,
2953 struct qeth_cmd_buffer
*iob
)
2955 qeth_mpc_finalize_cmd(card
, iob
);
2957 /* override with IPA-specific values: */
2958 __ipa_cmd(iob
)->hdr
.seqno
= card
->seqno
.ipa
++;
2961 static void qeth_prepare_ipa_cmd(struct qeth_card
*card
,
2962 struct qeth_cmd_buffer
*iob
, u16 cmd_length
)
2964 u8 prot_type
= qeth_mpc_select_prot_type(card
);
2965 u16 total_length
= iob
->length
;
2967 qeth_setup_ccw(__ccw_from_cmd(iob
), CCW_CMD_WRITE
, 0, total_length
,
2969 iob
->finalize
= qeth_ipa_finalize_cmd
;
2971 memcpy(iob
->data
, IPA_PDU_HEADER
, IPA_PDU_HEADER_SIZE
);
2972 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob
->data
), &total_length
, 2);
2973 memcpy(QETH_IPA_CMD_PROT_TYPE(iob
->data
), &prot_type
, 1);
2974 memcpy(QETH_IPA_PDU_LEN_PDU1(iob
->data
), &cmd_length
, 2);
2975 memcpy(QETH_IPA_PDU_LEN_PDU2(iob
->data
), &cmd_length
, 2);
2976 memcpy(QETH_IPA_CMD_DEST_ADDR(iob
->data
),
2977 &card
->token
.ulp_connection_r
, QETH_MPC_TOKEN_LENGTH
);
2978 memcpy(QETH_IPA_PDU_LEN_PDU3(iob
->data
), &cmd_length
, 2);
2981 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer
*iob
,
2982 struct qeth_cmd_buffer
*reply
)
2984 struct qeth_ipa_cmd
*ipa_reply
= __ipa_reply(reply
);
2986 return ipa_reply
&& (__ipa_cmd(iob
)->hdr
.seqno
== ipa_reply
->hdr
.seqno
);
2989 struct qeth_cmd_buffer
*qeth_ipa_alloc_cmd(struct qeth_card
*card
,
2990 enum qeth_ipa_cmds cmd_code
,
2991 enum qeth_prot_versions prot
,
2992 unsigned int data_length
)
2994 struct qeth_cmd_buffer
*iob
;
2995 struct qeth_ipacmd_hdr
*hdr
;
2997 data_length
+= offsetof(struct qeth_ipa_cmd
, data
);
2998 iob
= qeth_alloc_cmd(&card
->write
, IPA_PDU_HEADER_SIZE
+ data_length
, 1,
3003 qeth_prepare_ipa_cmd(card
, iob
, data_length
);
3004 iob
->match
= qeth_ipa_match_reply
;
3006 hdr
= &__ipa_cmd(iob
)->hdr
;
3007 hdr
->command
= cmd_code
;
3008 hdr
->initiator
= IPA_CMD_INITIATOR_HOST
;
3009 /* hdr->seqno is set by qeth_send_control_data() */
3010 hdr
->adapter_type
= QETH_LINK_TYPE_FAST_ETH
;
3011 hdr
->rel_adapter_no
= (u8
) card
->dev
->dev_port
;
3012 hdr
->prim_version_no
= IS_LAYER2(card
) ? 2 : 1;
3013 hdr
->param_count
= 1;
3014 hdr
->prot_version
= prot
;
3017 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd
);
3019 static int qeth_send_ipa_cmd_cb(struct qeth_card
*card
,
3020 struct qeth_reply
*reply
, unsigned long data
)
3022 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3024 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3028 * qeth_send_ipa_cmd() - send an IPA command
3030 * See qeth_send_control_data() for explanation of the arguments.
3033 int qeth_send_ipa_cmd(struct qeth_card
*card
, struct qeth_cmd_buffer
*iob
,
3034 int (*reply_cb
)(struct qeth_card
*, struct qeth_reply
*,
3040 QETH_CARD_TEXT(card
, 4, "sendipa");
3042 if (card
->read_or_write_problem
) {
3047 if (reply_cb
== NULL
)
3048 reply_cb
= qeth_send_ipa_cmd_cb
;
3049 rc
= qeth_send_control_data(card
, iob
, reply_cb
, reply_param
);
3051 qeth_clear_ipacmd_list(card
);
3052 qeth_schedule_recovery(card
);
3056 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd
);
3058 static int qeth_send_startlan_cb(struct qeth_card
*card
,
3059 struct qeth_reply
*reply
, unsigned long data
)
3061 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3063 if (cmd
->hdr
.return_code
== IPA_RC_LAN_OFFLINE
)
3066 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
3069 static int qeth_send_startlan(struct qeth_card
*card
)
3071 struct qeth_cmd_buffer
*iob
;
3073 QETH_CARD_TEXT(card
, 2, "strtlan");
3075 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_STARTLAN
, QETH_PROT_NONE
, 0);
3078 return qeth_send_ipa_cmd(card
, iob
, qeth_send_startlan_cb
, NULL
);
3081 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
3083 if (!cmd
->hdr
.return_code
)
3084 cmd
->hdr
.return_code
=
3085 cmd
->data
.setadapterparms
.hdr
.return_code
;
3086 return cmd
->hdr
.return_code
;
3089 static int qeth_query_setadapterparms_cb(struct qeth_card
*card
,
3090 struct qeth_reply
*reply
, unsigned long data
)
3092 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3093 struct qeth_query_cmds_supp
*query_cmd
;
3095 QETH_CARD_TEXT(card
, 3, "quyadpcb");
3096 if (qeth_setadpparms_inspect_rc(cmd
))
3099 query_cmd
= &cmd
->data
.setadapterparms
.data
.query_cmds_supp
;
3100 if (query_cmd
->lan_type
& 0x7f) {
3101 if (!qeth_is_supported_link_type(card
, query_cmd
->lan_type
))
3102 return -EPROTONOSUPPORT
;
3104 card
->info
.link_type
= query_cmd
->lan_type
;
3105 QETH_CARD_TEXT_(card
, 2, "lnk %d", card
->info
.link_type
);
3108 card
->options
.adp
.supported
= query_cmd
->supported_cmds
;
3112 static struct qeth_cmd_buffer
*qeth_get_adapter_cmd(struct qeth_card
*card
,
3113 enum qeth_ipa_setadp_cmd adp_cmd
,
3114 unsigned int data_length
)
3116 struct qeth_ipacmd_setadpparms_hdr
*hdr
;
3117 struct qeth_cmd_buffer
*iob
;
3119 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETADAPTERPARMS
, QETH_PROT_IPV4
,
3121 offsetof(struct qeth_ipacmd_setadpparms
,
3126 hdr
= &__ipa_cmd(iob
)->data
.setadapterparms
.hdr
;
3127 hdr
->cmdlength
= sizeof(*hdr
) + data_length
;
3128 hdr
->command_code
= adp_cmd
;
3129 hdr
->used_total
= 1;
3134 static int qeth_query_setadapterparms(struct qeth_card
*card
)
3137 struct qeth_cmd_buffer
*iob
;
3139 QETH_CARD_TEXT(card
, 3, "queryadp");
3140 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_COMMANDS_SUPPORTED
,
3141 SETADP_DATA_SIZEOF(query_cmds_supp
));
3144 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_setadapterparms_cb
, NULL
);
3148 static int qeth_query_ipassists_cb(struct qeth_card
*card
,
3149 struct qeth_reply
*reply
, unsigned long data
)
3151 struct qeth_ipa_cmd
*cmd
;
3153 QETH_CARD_TEXT(card
, 2, "qipasscb");
3155 cmd
= (struct qeth_ipa_cmd
*) data
;
3157 switch (cmd
->hdr
.return_code
) {
3158 case IPA_RC_SUCCESS
:
3160 case IPA_RC_NOTSUPP
:
3161 case IPA_RC_L2_UNSUPPORTED_CMD
:
3162 QETH_CARD_TEXT(card
, 2, "ipaunsup");
3163 card
->options
.ipa4
.supported
|= IPA_SETADAPTERPARMS
;
3164 card
->options
.ipa6
.supported
|= IPA_SETADAPTERPARMS
;
3167 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3168 CARD_DEVID(card
), cmd
->hdr
.return_code
);
3172 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
3173 card
->options
.ipa4
= cmd
->hdr
.assists
;
3174 else if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
3175 card
->options
.ipa6
= cmd
->hdr
.assists
;
3177 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3182 static int qeth_query_ipassists(struct qeth_card
*card
,
3183 enum qeth_prot_versions prot
)
3186 struct qeth_cmd_buffer
*iob
;
3188 QETH_CARD_TEXT_(card
, 2, "qipassi%i", prot
);
3189 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_QIPASSIST
, prot
, 0);
3192 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_query_ipassists_cb
, NULL
);
3196 static int qeth_query_switch_attributes_cb(struct qeth_card
*card
,
3197 struct qeth_reply
*reply
, unsigned long data
)
3199 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3200 struct qeth_query_switch_attributes
*attrs
;
3201 struct qeth_switch_info
*sw_info
;
3203 QETH_CARD_TEXT(card
, 2, "qswiatcb");
3204 if (qeth_setadpparms_inspect_rc(cmd
))
3207 sw_info
= (struct qeth_switch_info
*)reply
->param
;
3208 attrs
= &cmd
->data
.setadapterparms
.data
.query_switch_attributes
;
3209 sw_info
->capabilities
= attrs
->capabilities
;
3210 sw_info
->settings
= attrs
->settings
;
3211 QETH_CARD_TEXT_(card
, 2, "%04x%04x", sw_info
->capabilities
,
3216 int qeth_query_switch_attributes(struct qeth_card
*card
,
3217 struct qeth_switch_info
*sw_info
)
3219 struct qeth_cmd_buffer
*iob
;
3221 QETH_CARD_TEXT(card
, 2, "qswiattr");
3222 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
))
3224 if (!netif_carrier_ok(card
->dev
))
3226 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES
, 0);
3229 return qeth_send_ipa_cmd(card
, iob
,
3230 qeth_query_switch_attributes_cb
, sw_info
);
3233 struct qeth_cmd_buffer
*qeth_get_diag_cmd(struct qeth_card
*card
,
3234 enum qeth_diags_cmds sub_cmd
,
3235 unsigned int data_length
)
3237 struct qeth_ipacmd_diagass
*cmd
;
3238 struct qeth_cmd_buffer
*iob
;
3240 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SET_DIAG_ASS
, QETH_PROT_NONE
,
3241 DIAG_HDR_LEN
+ data_length
);
3245 cmd
= &__ipa_cmd(iob
)->data
.diagass
;
3246 cmd
->subcmd_len
= DIAG_SUB_HDR_LEN
+ data_length
;
3247 cmd
->subcmd
= sub_cmd
;
3250 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd
);
3252 static int qeth_query_setdiagass_cb(struct qeth_card
*card
,
3253 struct qeth_reply
*reply
, unsigned long data
)
3255 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3256 u16 rc
= cmd
->hdr
.return_code
;
3259 QETH_CARD_TEXT_(card
, 2, "diagq:%x", rc
);
3263 card
->info
.diagass_support
= cmd
->data
.diagass
.ext
;
3267 static int qeth_query_setdiagass(struct qeth_card
*card
)
3269 struct qeth_cmd_buffer
*iob
;
3271 QETH_CARD_TEXT(card
, 2, "qdiagass");
3272 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_QUERY
, 0);
3275 return qeth_send_ipa_cmd(card
, iob
, qeth_query_setdiagass_cb
, NULL
);
3278 static void qeth_get_trap_id(struct qeth_card
*card
, struct qeth_trap_id
*tid
)
3280 unsigned long info
= get_zeroed_page(GFP_KERNEL
);
3281 struct sysinfo_2_2_2
*info222
= (struct sysinfo_2_2_2
*)info
;
3282 struct sysinfo_3_2_2
*info322
= (struct sysinfo_3_2_2
*)info
;
3283 struct ccw_dev_id ccwid
;
3286 tid
->chpid
= card
->info
.chpid
;
3287 ccw_device_get_id(CARD_RDEV(card
), &ccwid
);
3288 tid
->ssid
= ccwid
.ssid
;
3289 tid
->devno
= ccwid
.devno
;
3292 level
= stsi(NULL
, 0, 0, 0);
3293 if ((level
>= 2) && (stsi(info222
, 2, 2, 2) == 0))
3294 tid
->lparnr
= info222
->lpar_number
;
3295 if ((level
>= 3) && (stsi(info322
, 3, 2, 2) == 0)) {
3296 EBCASC(info322
->vm
[0].name
, sizeof(info322
->vm
[0].name
));
3297 memcpy(tid
->vmname
, info322
->vm
[0].name
, sizeof(tid
->vmname
));
3302 static int qeth_hw_trap_cb(struct qeth_card
*card
,
3303 struct qeth_reply
*reply
, unsigned long data
)
3305 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
3306 u16 rc
= cmd
->hdr
.return_code
;
3309 QETH_CARD_TEXT_(card
, 2, "trapc:%x", rc
);
3315 int qeth_hw_trap(struct qeth_card
*card
, enum qeth_diags_trap_action action
)
3317 struct qeth_cmd_buffer
*iob
;
3318 struct qeth_ipa_cmd
*cmd
;
3320 QETH_CARD_TEXT(card
, 2, "diagtrap");
3321 iob
= qeth_get_diag_cmd(card
, QETH_DIAGS_CMD_TRAP
, 64);
3324 cmd
= __ipa_cmd(iob
);
3325 cmd
->data
.diagass
.type
= 1;
3326 cmd
->data
.diagass
.action
= action
;
3328 case QETH_DIAGS_TRAP_ARM
:
3329 cmd
->data
.diagass
.options
= 0x0003;
3330 cmd
->data
.diagass
.ext
= 0x00010000 +
3331 sizeof(struct qeth_trap_id
);
3332 qeth_get_trap_id(card
,
3333 (struct qeth_trap_id
*)cmd
->data
.diagass
.cdata
);
3335 case QETH_DIAGS_TRAP_DISARM
:
3336 cmd
->data
.diagass
.options
= 0x0001;
3338 case QETH_DIAGS_TRAP_CAPTURE
:
3341 return qeth_send_ipa_cmd(card
, iob
, qeth_hw_trap_cb
, NULL
);
3344 static int qeth_check_qdio_errors(struct qeth_card
*card
,
3345 struct qdio_buffer
*buf
,
3346 unsigned int qdio_error
,
3347 const char *dbftext
)
3350 QETH_CARD_TEXT(card
, 2, dbftext
);
3351 QETH_CARD_TEXT_(card
, 2, " F15=%02X",
3352 buf
->element
[15].sflags
);
3353 QETH_CARD_TEXT_(card
, 2, " F14=%02X",
3354 buf
->element
[14].sflags
);
3355 QETH_CARD_TEXT_(card
, 2, " qerr=%X", qdio_error
);
3356 if ((buf
->element
[15].sflags
) == 0x12) {
3357 QETH_CARD_STAT_INC(card
, rx_fifo_errors
);
3365 static unsigned int qeth_rx_refill_queue(struct qeth_card
*card
,
3368 struct qeth_qdio_q
*queue
= card
->qdio
.in_q
;
3369 struct list_head
*lh
;
3374 /* only requeue at a certain threshold to avoid SIGAs */
3375 if (count
>= QETH_IN_BUF_REQUEUE_THRESHOLD(card
)) {
3376 for (i
= queue
->next_buf_to_init
;
3377 i
< queue
->next_buf_to_init
+ count
; ++i
) {
3378 if (qeth_init_input_buffer(card
,
3379 &queue
->bufs
[QDIO_BUFNR(i
)])) {
3386 if (newcount
< count
) {
3387 /* we are in memory shortage so we switch back to
3388 traditional skb allocation and drop packages */
3389 atomic_set(&card
->force_alloc_skb
, 3);
3392 atomic_add_unless(&card
->force_alloc_skb
, -1, 0);
3397 list_for_each(lh
, &card
->qdio
.in_buf_pool
.entry_list
)
3399 if (i
== card
->qdio
.in_buf_pool
.buf_count
) {
3400 QETH_CARD_TEXT(card
, 2, "qsarbw");
3401 schedule_delayed_work(
3402 &card
->buffer_reclaim_work
,
3403 QETH_RECLAIM_WORK_TIME
);
3408 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, 0,
3409 queue
->next_buf_to_init
, count
, NULL
);
3411 QETH_CARD_TEXT(card
, 2, "qinberr");
3413 queue
->next_buf_to_init
= QDIO_BUFNR(queue
->next_buf_to_init
+
3421 static void qeth_buffer_reclaim_work(struct work_struct
*work
)
3423 struct qeth_card
*card
= container_of(to_delayed_work(work
),
3425 buffer_reclaim_work
);
3428 napi_schedule(&card
->napi
);
3429 /* kick-start the NAPI softirq: */
3433 static void qeth_handle_send_error(struct qeth_card
*card
,
3434 struct qeth_qdio_out_buffer
*buffer
, unsigned int qdio_err
)
3436 int sbalf15
= buffer
->buffer
->element
[15].sflags
;
3438 QETH_CARD_TEXT(card
, 6, "hdsnderr");
3439 qeth_check_qdio_errors(card
, buffer
->buffer
, qdio_err
, "qouterr");
3444 if ((sbalf15
>= 15) && (sbalf15
<= 31))
3447 QETH_CARD_TEXT(card
, 1, "lnkfail");
3448 QETH_CARD_TEXT_(card
, 1, "%04x %02x",
3449 (u16
)qdio_err
, (u8
)sbalf15
);
3453 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3454 * @queue: queue to check for packing buffer
3456 * Returns number of buffers that were prepared for flush.
3458 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q
*queue
)
3460 struct qeth_qdio_out_buffer
*buffer
;
3462 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
3463 if ((atomic_read(&buffer
->state
) == QETH_QDIO_BUF_EMPTY
) &&
3464 (buffer
->next_element_to_fill
> 0)) {
3465 /* it's a packing buffer */
3466 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
3467 queue
->next_buf_to_fill
=
3468 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
3475 * Switched to packing state if the number of used buffers on a queue
3476 * reaches a certain limit.
3478 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q
*queue
)
3480 if (!queue
->do_pack
) {
3481 if (atomic_read(&queue
->used_buffers
)
3482 >= QETH_HIGH_WATERMARK_PACK
){
3483 /* switch non-PACKING -> PACKING */
3484 QETH_CARD_TEXT(queue
->card
, 6, "np->pack");
3485 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3492 * Switches from packing to non-packing mode. If there is a packing
3493 * buffer on the queue this buffer will be prepared to be flushed.
3494 * In that case 1 is returned to inform the caller. If no buffer
3495 * has to be flushed, zero is returned.
3497 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q
*queue
)
3499 if (queue
->do_pack
) {
3500 if (atomic_read(&queue
->used_buffers
)
3501 <= QETH_LOW_WATERMARK_PACK
) {
3502 /* switch PACKING -> non-PACKING */
3503 QETH_CARD_TEXT(queue
->card
, 6, "pack->np");
3504 QETH_TXQ_STAT_INC(queue
, packing_mode_switch
);
3506 return qeth_prep_flush_pack_buffer(queue
);
3512 static void qeth_flush_buffers(struct qeth_qdio_out_q
*queue
, int index
,
3515 struct qeth_qdio_out_buffer
*buf
= queue
->bufs
[index
];
3516 struct qeth_card
*card
= queue
->card
;
3517 unsigned int frames
, usecs
;
3518 struct qaob
*aob
= NULL
;
3522 for (i
= index
; i
< index
+ count
; ++i
) {
3523 unsigned int bidx
= QDIO_BUFNR(i
);
3524 struct sk_buff
*skb
;
3526 buf
= queue
->bufs
[bidx
];
3527 buf
->buffer
->element
[buf
->next_element_to_fill
- 1].eflags
|=
3528 SBAL_EFLAGS_LAST_ENTRY
;
3529 queue
->coalesced_frames
+= buf
->frames
;
3532 skb_queue_walk(&buf
->skb_list
, skb
)
3533 skb_tx_timestamp(skb
);
3538 if (card
->options
.cq
== QETH_CQ_ENABLED
&&
3539 !qeth_iqd_is_mcast_queue(card
, queue
) &&
3542 buf
->aob
= kmem_cache_zalloc(qeth_qaob_cache
,
3545 struct qeth_qaob_priv1
*priv
;
3548 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
3549 priv
->state
= QETH_QAOB_ISSUED
;
3550 priv
->queue_no
= queue
->queue_no
;
3554 if (!queue
->do_pack
) {
3555 if ((atomic_read(&queue
->used_buffers
) >=
3556 (QETH_HIGH_WATERMARK_PACK
-
3557 QETH_WATERMARK_PACK_FUZZ
)) &&
3558 !atomic_read(&queue
->set_pci_flags_count
)) {
3559 /* it's likely that we'll go to packing
3561 atomic_inc(&queue
->set_pci_flags_count
);
3562 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3565 if (!atomic_read(&queue
->set_pci_flags_count
)) {
3567 * there's no outstanding PCI any more, so we
3568 * have to request a PCI to be sure the the PCI
3569 * will wake at some time in the future then we
3570 * can flush packed buffers that might still be
3571 * hanging around, which can happen if no
3572 * further send was requested by the stack
3574 atomic_inc(&queue
->set_pci_flags_count
);
3575 buf
->buffer
->element
[0].sflags
|= SBAL_SFLAGS0_PCI_REQ
;
3580 QETH_TXQ_STAT_INC(queue
, doorbell
);
3581 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_OUTPUT
, queue
->queue_no
,
3587 /* ignore temporary SIGA errors without busy condition */
3589 /* Fake the TX completion interrupt: */
3590 frames
= READ_ONCE(queue
->max_coalesced_frames
);
3591 usecs
= READ_ONCE(queue
->coalesce_usecs
);
3593 if (frames
&& queue
->coalesced_frames
>= frames
) {
3594 napi_schedule(&queue
->napi
);
3595 queue
->coalesced_frames
= 0;
3596 QETH_TXQ_STAT_INC(queue
, coal_frames
);
3597 } else if (qeth_use_tx_irqs(card
) &&
3598 atomic_read(&queue
->used_buffers
) >= 32) {
3599 /* Old behaviour carried over from the qdio layer: */
3600 napi_schedule(&queue
->napi
);
3601 QETH_TXQ_STAT_INC(queue
, coal_frames
);
3603 qeth_tx_arm_timer(queue
, usecs
);
3608 QETH_CARD_TEXT(queue
->card
, 2, "flushbuf");
3609 QETH_CARD_TEXT_(queue
->card
, 2, " q%d", queue
->queue_no
);
3610 QETH_CARD_TEXT_(queue
->card
, 2, " idx%d", index
);
3611 QETH_CARD_TEXT_(queue
->card
, 2, " c%d", count
);
3612 QETH_CARD_TEXT_(queue
->card
, 2, " err%d", rc
);
3614 /* this must not happen under normal circumstances. if it
3615 * happens something is really wrong -> recover */
3616 qeth_schedule_recovery(queue
->card
);
3620 static void qeth_flush_queue(struct qeth_qdio_out_q
*queue
)
3622 qeth_flush_buffers(queue
, queue
->bulk_start
, queue
->bulk_count
);
3624 queue
->bulk_start
= QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
);
3625 queue
->prev_hdr
= NULL
;
3626 queue
->bulk_count
= 0;
3629 static void qeth_check_outbound_queue(struct qeth_qdio_out_q
*queue
)
3632 * check if weed have to switch to non-packing mode or if
3633 * we have to get a pci flag out on the queue
3635 if ((atomic_read(&queue
->used_buffers
) <= QETH_LOW_WATERMARK_PACK
) ||
3636 !atomic_read(&queue
->set_pci_flags_count
)) {
3637 unsigned int index
, flush_cnt
;
3640 spin_lock(&queue
->lock
);
3642 index
= queue
->next_buf_to_fill
;
3643 q_was_packing
= queue
->do_pack
;
3645 flush_cnt
= qeth_switch_to_nonpacking_if_needed(queue
);
3646 if (!flush_cnt
&& !atomic_read(&queue
->set_pci_flags_count
))
3647 flush_cnt
= qeth_prep_flush_pack_buffer(queue
);
3650 qeth_flush_buffers(queue
, index
, flush_cnt
);
3652 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_cnt
);
3655 spin_unlock(&queue
->lock
);
3659 static void qeth_qdio_poll(struct ccw_device
*cdev
, unsigned long card_ptr
)
3661 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3663 napi_schedule_irqoff(&card
->napi
);
3666 int qeth_configure_cq(struct qeth_card
*card
, enum qeth_cq cq
)
3670 if (card
->options
.cq
== QETH_CQ_NOTAVAILABLE
) {
3674 if (card
->options
.cq
== cq
) {
3679 qeth_free_qdio_queues(card
);
3680 card
->options
.cq
= cq
;
3687 EXPORT_SYMBOL_GPL(qeth_configure_cq
);
3689 static void qeth_qdio_handle_aob(struct qeth_card
*card
, struct qaob
*aob
)
3691 struct qeth_qaob_priv1
*priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
3692 unsigned int queue_no
= priv
->queue_no
;
3694 BUILD_BUG_ON(sizeof(*priv
) > ARRAY_SIZE(aob
->user1
));
3696 if (xchg(&priv
->state
, QETH_QAOB_DONE
) == QETH_QAOB_PENDING
&&
3697 queue_no
< card
->qdio
.no_out_queues
)
3698 napi_schedule(&card
->qdio
.out_qs
[queue_no
]->napi
);
3701 static void qeth_qdio_cq_handler(struct qeth_card
*card
, unsigned int qdio_err
,
3702 unsigned int queue
, int first_element
,
3705 struct qeth_qdio_q
*cq
= card
->qdio
.c_q
;
3709 QETH_CARD_TEXT_(card
, 5, "qcqhe%d", first_element
);
3710 QETH_CARD_TEXT_(card
, 5, "qcqhc%d", count
);
3711 QETH_CARD_TEXT_(card
, 5, "qcqherr%d", qdio_err
);
3714 netif_tx_stop_all_queues(card
->dev
);
3715 qeth_schedule_recovery(card
);
3719 for (i
= first_element
; i
< first_element
+ count
; ++i
) {
3720 struct qdio_buffer
*buffer
= cq
->qdio_bufs
[QDIO_BUFNR(i
)];
3723 while ((e
< QDIO_MAX_ELEMENTS_PER_BUFFER
) &&
3724 buffer
->element
[e
].addr
) {
3725 unsigned long phys_aob_addr
= buffer
->element
[e
].addr
;
3727 qeth_qdio_handle_aob(card
, phys_to_virt(phys_aob_addr
));
3730 qeth_scrub_qdio_buffer(buffer
, QDIO_MAX_ELEMENTS_PER_BUFFER
);
3732 rc
= do_QDIO(CARD_DDEV(card
), QDIO_FLAG_SYNC_INPUT
, queue
,
3733 cq
->next_buf_to_init
, count
, NULL
);
3735 dev_warn(&card
->gdev
->dev
,
3736 "QDIO reported an error, rc=%i\n", rc
);
3737 QETH_CARD_TEXT(card
, 2, "qcqherr");
3740 cq
->next_buf_to_init
= QDIO_BUFNR(cq
->next_buf_to_init
+ count
);
3743 static void qeth_qdio_input_handler(struct ccw_device
*ccwdev
,
3744 unsigned int qdio_err
, int queue
,
3745 int first_elem
, int count
,
3746 unsigned long card_ptr
)
3748 struct qeth_card
*card
= (struct qeth_card
*)card_ptr
;
3750 QETH_CARD_TEXT_(card
, 2, "qihq%d", queue
);
3751 QETH_CARD_TEXT_(card
, 2, "qiec%d", qdio_err
);
3754 qeth_schedule_recovery(card
);
3757 static void qeth_qdio_output_handler(struct ccw_device
*ccwdev
,
3758 unsigned int qdio_error
, int __queue
,
3759 int first_element
, int count
,
3760 unsigned long card_ptr
)
3762 struct qeth_card
*card
= (struct qeth_card
*) card_ptr
;
3764 QETH_CARD_TEXT(card
, 2, "achkcond");
3765 netif_tx_stop_all_queues(card
->dev
);
3766 qeth_schedule_recovery(card
);
3770 * Note: Function assumes that we have 4 outbound queues.
3772 int qeth_get_priority_queue(struct qeth_card
*card
, struct sk_buff
*skb
)
3774 struct vlan_ethhdr
*veth
= vlan_eth_hdr(skb
);
3777 switch (card
->qdio
.do_prio_queueing
) {
3778 case QETH_PRIO_Q_ING_TOS
:
3779 case QETH_PRIO_Q_ING_PREC
:
3780 switch (vlan_get_protocol(skb
)) {
3781 case htons(ETH_P_IP
):
3782 tos
= ipv4_get_dsfield(ip_hdr(skb
));
3784 case htons(ETH_P_IPV6
):
3785 tos
= ipv6_get_dsfield(ipv6_hdr(skb
));
3788 return card
->qdio
.default_out_queue
;
3790 if (card
->qdio
.do_prio_queueing
== QETH_PRIO_Q_ING_PREC
)
3791 return ~tos
>> 6 & 3;
3792 if (tos
& IPTOS_MINCOST
)
3794 if (tos
& IPTOS_RELIABILITY
)
3796 if (tos
& IPTOS_THROUGHPUT
)
3798 if (tos
& IPTOS_LOWDELAY
)
3801 case QETH_PRIO_Q_ING_SKB
:
3802 if (skb
->priority
> 5)
3804 return ~skb
->priority
>> 1 & 3;
3805 case QETH_PRIO_Q_ING_VLAN
:
3806 if (veth
->h_vlan_proto
== htons(ETH_P_8021Q
))
3807 return ~ntohs(veth
->h_vlan_TCI
) >>
3808 (VLAN_PRIO_SHIFT
+ 1) & 3;
3810 case QETH_PRIO_Q_ING_FIXED
:
3811 return card
->qdio
.default_out_queue
;
3815 return card
->qdio
.default_out_queue
;
3817 EXPORT_SYMBOL_GPL(qeth_get_priority_queue
);
3820 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3823 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3824 * fragmented part of the SKB. Returns zero for linear SKB.
3826 static int qeth_get_elements_for_frags(struct sk_buff
*skb
)
3828 int cnt
, elements
= 0;
3830 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
3831 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
3833 elements
+= qeth_get_elements_for_range(
3834 (addr_t
)skb_frag_address(frag
),
3835 (addr_t
)skb_frag_address(frag
) + skb_frag_size(frag
));
3841 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3842 * to transmit an skb.
3843 * @skb: the skb to operate on.
3844 * @data_offset: skip this part of the skb's linear data
3846 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3847 * skb's data (both its linear part and paged fragments).
3849 static unsigned int qeth_count_elements(struct sk_buff
*skb
,
3850 unsigned int data_offset
)
3852 unsigned int elements
= qeth_get_elements_for_frags(skb
);
3853 addr_t end
= (addr_t
)skb
->data
+ skb_headlen(skb
);
3854 addr_t start
= (addr_t
)skb
->data
+ data_offset
;
3857 elements
+= qeth_get_elements_for_range(start
, end
);
3861 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3865 * qeth_add_hw_header() - add a HW header to an skb.
3866 * @queue: TX queue that the skb will be placed on.
3867 * @skb: skb that the HW header should be added to.
3868 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3869 * it contains a valid pointer to a qeth_hdr.
3870 * @hdr_len: length of the HW header.
3871 * @proto_len: length of protocol headers that need to be in same page as the
3873 * @elements: returns the required number of buffer elements for this skb.
3875 * Returns the pushed length. If the header can't be pushed on
3876 * (eg. because it would cross a page boundary), it is allocated from
3877 * the cache instead and 0 is returned.
3878 * The number of needed buffer elements is returned in @elements.
3879 * Error to create the hdr is indicated by returning with < 0.
3881 static int qeth_add_hw_header(struct qeth_qdio_out_q
*queue
,
3882 struct sk_buff
*skb
, struct qeth_hdr
**hdr
,
3883 unsigned int hdr_len
, unsigned int proto_len
,
3884 unsigned int *elements
)
3886 gfp_t gfp
= GFP_ATOMIC
| (skb_pfmemalloc(skb
) ? __GFP_MEMALLOC
: 0);
3887 const unsigned int contiguous
= proto_len
? proto_len
: 1;
3888 const unsigned int max_elements
= queue
->max_elements
;
3889 unsigned int __elements
;
3895 start
= (addr_t
)skb
->data
- hdr_len
;
3896 end
= (addr_t
)skb
->data
;
3898 if (qeth_get_elements_for_range(start
, end
+ contiguous
) == 1) {
3899 /* Push HW header into same page as first protocol header. */
3901 /* ... but TSO always needs a separate element for headers: */
3902 if (skb_is_gso(skb
))
3903 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3905 __elements
= qeth_count_elements(skb
, 0);
3906 } else if (!proto_len
&& PAGE_ALIGNED(skb
->data
)) {
3907 /* Push HW header into preceding page, flush with skb->data. */
3909 __elements
= 1 + qeth_count_elements(skb
, 0);
3911 /* Use header cache, copy protocol headers up. */
3913 __elements
= 1 + qeth_count_elements(skb
, proto_len
);
3916 /* Compress skb to fit into one IO buffer: */
3917 if (__elements
> max_elements
) {
3918 if (!skb_is_nonlinear(skb
)) {
3919 /* Drop it, no easy way of shrinking it further. */
3920 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3921 max_elements
, __elements
, skb
->len
);
3925 rc
= skb_linearize(skb
);
3927 QETH_TXQ_STAT_INC(queue
, skbs_linearized_fail
);
3931 QETH_TXQ_STAT_INC(queue
, skbs_linearized
);
3932 /* Linearization changed the layout, re-evaluate: */
3936 *elements
= __elements
;
3937 /* Add the header: */
3939 *hdr
= skb_push(skb
, hdr_len
);
3943 /* Fall back to cache element with known-good alignment: */
3944 if (hdr_len
+ proto_len
> QETH_HDR_CACHE_OBJ_SIZE
)
3946 *hdr
= kmem_cache_alloc(qeth_core_header_cache
, gfp
);
3949 /* Copy protocol headers behind HW header: */
3950 skb_copy_from_linear_data(skb
, ((char *)*hdr
) + hdr_len
, proto_len
);
3954 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q
*queue
,
3955 struct sk_buff
*curr_skb
,
3956 struct qeth_hdr
*curr_hdr
)
3958 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[queue
->bulk_start
];
3959 struct qeth_hdr
*prev_hdr
= queue
->prev_hdr
;
3964 /* All packets must have the same target: */
3965 if (curr_hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
3966 struct sk_buff
*prev_skb
= skb_peek(&buffer
->skb_list
);
3968 return ether_addr_equal(eth_hdr(prev_skb
)->h_dest
,
3969 eth_hdr(curr_skb
)->h_dest
) &&
3970 qeth_l2_same_vlan(&prev_hdr
->hdr
.l2
, &curr_hdr
->hdr
.l2
);
3973 return qeth_l3_same_next_hop(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
) &&
3974 qeth_l3_iqd_same_vlan(&prev_hdr
->hdr
.l3
, &curr_hdr
->hdr
.l3
);
3978 * qeth_fill_buffer() - map skb into an output buffer
3979 * @buf: buffer to transport the skb
3980 * @skb: skb to map into the buffer
3981 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3982 * from qeth_core_header_cache.
3983 * @offset: when mapping the skb, start at skb->data + offset
3984 * @hd_len: if > 0, build a dedicated header element of this size
3986 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer
*buf
,
3987 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
3988 unsigned int offset
, unsigned int hd_len
)
3990 struct qdio_buffer
*buffer
= buf
->buffer
;
3991 int element
= buf
->next_element_to_fill
;
3992 int length
= skb_headlen(skb
) - offset
;
3993 char *data
= skb
->data
+ offset
;
3994 unsigned int elem_length
, cnt
;
3995 bool is_first_elem
= true;
3997 __skb_queue_tail(&buf
->skb_list
, skb
);
3999 /* build dedicated element for HW Header */
4001 is_first_elem
= false;
4003 buffer
->element
[element
].addr
= virt_to_phys(hdr
);
4004 buffer
->element
[element
].length
= hd_len
;
4005 buffer
->element
[element
].eflags
= SBAL_EFLAGS_FIRST_FRAG
;
4007 /* HW header is allocated from cache: */
4008 if ((void *)hdr
!= skb
->data
)
4009 __set_bit(element
, buf
->from_kmem_cache
);
4010 /* HW header was pushed and is contiguous with linear part: */
4011 else if (length
> 0 && !PAGE_ALIGNED(data
) &&
4012 (data
== (char *)hdr
+ hd_len
))
4013 buffer
->element
[element
].eflags
|=
4014 SBAL_EFLAGS_CONTIGUOUS
;
4019 /* map linear part into buffer element(s) */
4020 while (length
> 0) {
4021 elem_length
= min_t(unsigned int, length
,
4022 PAGE_SIZE
- offset_in_page(data
));
4024 buffer
->element
[element
].addr
= virt_to_phys(data
);
4025 buffer
->element
[element
].length
= elem_length
;
4026 length
-= elem_length
;
4027 if (is_first_elem
) {
4028 is_first_elem
= false;
4029 if (length
|| skb_is_nonlinear(skb
))
4030 /* skb needs additional elements */
4031 buffer
->element
[element
].eflags
=
4032 SBAL_EFLAGS_FIRST_FRAG
;
4034 buffer
->element
[element
].eflags
= 0;
4036 buffer
->element
[element
].eflags
=
4037 SBAL_EFLAGS_MIDDLE_FRAG
;
4040 data
+= elem_length
;
4044 /* map page frags into buffer element(s) */
4045 for (cnt
= 0; cnt
< skb_shinfo(skb
)->nr_frags
; cnt
++) {
4046 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[cnt
];
4048 data
= skb_frag_address(frag
);
4049 length
= skb_frag_size(frag
);
4050 while (length
> 0) {
4051 elem_length
= min_t(unsigned int, length
,
4052 PAGE_SIZE
- offset_in_page(data
));
4054 buffer
->element
[element
].addr
= virt_to_phys(data
);
4055 buffer
->element
[element
].length
= elem_length
;
4056 buffer
->element
[element
].eflags
=
4057 SBAL_EFLAGS_MIDDLE_FRAG
;
4059 length
-= elem_length
;
4060 data
+= elem_length
;
4065 if (buffer
->element
[element
- 1].eflags
)
4066 buffer
->element
[element
- 1].eflags
= SBAL_EFLAGS_LAST_FRAG
;
4067 buf
->next_element_to_fill
= element
;
4071 static int __qeth_xmit(struct qeth_card
*card
, struct qeth_qdio_out_q
*queue
,
4072 struct sk_buff
*skb
, unsigned int elements
,
4073 struct qeth_hdr
*hdr
, unsigned int offset
,
4074 unsigned int hd_len
)
4076 unsigned int bytes
= qdisc_pkt_len(skb
);
4077 struct qeth_qdio_out_buffer
*buffer
;
4078 unsigned int next_element
;
4079 struct netdev_queue
*txq
;
4080 bool stopped
= false;
4083 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+ queue
->bulk_count
)];
4084 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4086 /* Just a sanity check, the wake/stop logic should ensure that we always
4087 * get a free buffer.
4089 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4092 flush
= !qeth_iqd_may_bulk(queue
, skb
, hdr
);
4095 (buffer
->next_element_to_fill
+ elements
> queue
->max_elements
)) {
4096 if (buffer
->next_element_to_fill
> 0) {
4097 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4098 queue
->bulk_count
++;
4101 if (queue
->bulk_count
>= queue
->bulk_max
)
4105 qeth_flush_queue(queue
);
4107 buffer
= queue
->bufs
[QDIO_BUFNR(queue
->bulk_start
+
4108 queue
->bulk_count
)];
4110 /* Sanity-check again: */
4111 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4115 if (buffer
->next_element_to_fill
== 0 &&
4116 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4117 /* If a TX completion happens right _here_ and misses to wake
4118 * the txq, then our re-check below will catch the race.
4120 QETH_TXQ_STAT_INC(queue
, stopped
);
4121 netif_tx_stop_queue(txq
);
4125 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4126 buffer
->bytes
+= bytes
;
4127 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4128 queue
->prev_hdr
= hdr
;
4130 flush
= __netdev_tx_sent_queue(txq
, bytes
,
4131 !stopped
&& netdev_xmit_more());
4133 if (flush
|| next_element
>= queue
->max_elements
) {
4134 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4135 queue
->bulk_count
++;
4137 if (queue
->bulk_count
>= queue
->bulk_max
)
4141 qeth_flush_queue(queue
);
4144 if (stopped
&& !qeth_out_queue_is_full(queue
))
4145 netif_tx_start_queue(txq
);
4149 static int qeth_do_send_packet(struct qeth_card
*card
,
4150 struct qeth_qdio_out_q
*queue
,
4151 struct sk_buff
*skb
, struct qeth_hdr
*hdr
,
4152 unsigned int offset
, unsigned int hd_len
,
4153 unsigned int elements_needed
)
4155 unsigned int start_index
= queue
->next_buf_to_fill
;
4156 struct qeth_qdio_out_buffer
*buffer
;
4157 unsigned int next_element
;
4158 struct netdev_queue
*txq
;
4159 bool stopped
= false;
4160 int flush_count
= 0;
4164 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4166 /* Just a sanity check, the wake/stop logic should ensure that we always
4167 * get a free buffer.
4169 if (atomic_read(&buffer
->state
) != QETH_QDIO_BUF_EMPTY
)
4172 txq
= netdev_get_tx_queue(card
->dev
, skb_get_queue_mapping(skb
));
4174 /* check if we need to switch packing state of this queue */
4175 qeth_switch_to_packing_if_needed(queue
);
4176 if (queue
->do_pack
) {
4178 /* does packet fit in current buffer? */
4179 if (buffer
->next_element_to_fill
+ elements_needed
>
4180 queue
->max_elements
) {
4181 /* ... no -> set state PRIMED */
4182 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4184 queue
->next_buf_to_fill
=
4185 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4186 buffer
= queue
->bufs
[queue
->next_buf_to_fill
];
4188 /* We stepped forward, so sanity-check again: */
4189 if (atomic_read(&buffer
->state
) !=
4190 QETH_QDIO_BUF_EMPTY
) {
4191 qeth_flush_buffers(queue
, start_index
,
4199 if (buffer
->next_element_to_fill
== 0 &&
4200 atomic_inc_return(&queue
->used_buffers
) >= QDIO_MAX_BUFFERS_PER_Q
) {
4201 /* If a TX completion happens right _here_ and misses to wake
4202 * the txq, then our re-check below will catch the race.
4204 QETH_TXQ_STAT_INC(queue
, stopped
);
4205 netif_tx_stop_queue(txq
);
4209 next_element
= qeth_fill_buffer(buffer
, skb
, hdr
, offset
, hd_len
);
4210 buffer
->bytes
+= qdisc_pkt_len(skb
);
4211 buffer
->frames
+= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 1;
4214 QETH_TXQ_STAT_INC(queue
, skbs_pack
);
4215 if (!queue
->do_pack
|| stopped
|| next_element
>= queue
->max_elements
) {
4217 atomic_set(&buffer
->state
, QETH_QDIO_BUF_PRIMED
);
4218 queue
->next_buf_to_fill
=
4219 QDIO_BUFNR(queue
->next_buf_to_fill
+ 1);
4223 qeth_flush_buffers(queue
, start_index
, flush_count
);
4227 QETH_TXQ_STAT_ADD(queue
, bufs_pack
, flush_count
);
4229 if (stopped
&& !qeth_out_queue_is_full(queue
))
4230 netif_tx_start_queue(txq
);
4234 static void qeth_fill_tso_ext(struct qeth_hdr_tso
*hdr
,
4235 unsigned int payload_len
, struct sk_buff
*skb
,
4236 unsigned int proto_len
)
4238 struct qeth_hdr_ext_tso
*ext
= &hdr
->ext
;
4240 ext
->hdr_tot_len
= sizeof(*ext
);
4241 ext
->imb_hdr_no
= 1;
4243 ext
->hdr_version
= 1;
4245 ext
->payload_len
= payload_len
;
4246 ext
->mss
= skb_shinfo(skb
)->gso_size
;
4247 ext
->dg_hdr_len
= proto_len
;
4250 int qeth_xmit(struct qeth_card
*card
, struct sk_buff
*skb
,
4251 struct qeth_qdio_out_q
*queue
, __be16 proto
,
4252 void (*fill_header
)(struct qeth_qdio_out_q
*queue
,
4253 struct qeth_hdr
*hdr
, struct sk_buff
*skb
,
4254 __be16 proto
, unsigned int data_len
))
4256 unsigned int proto_len
, hw_hdr_len
;
4257 unsigned int frame_len
= skb
->len
;
4258 bool is_tso
= skb_is_gso(skb
);
4259 unsigned int data_offset
= 0;
4260 struct qeth_hdr
*hdr
= NULL
;
4261 unsigned int hd_len
= 0;
4262 unsigned int elements
;
4266 hw_hdr_len
= sizeof(struct qeth_hdr_tso
);
4267 proto_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
4269 hw_hdr_len
= sizeof(struct qeth_hdr
);
4270 proto_len
= (IS_IQD(card
) && IS_LAYER2(card
)) ? ETH_HLEN
: 0;
4273 rc
= skb_cow_head(skb
, hw_hdr_len
);
4277 push_len
= qeth_add_hw_header(queue
, skb
, &hdr
, hw_hdr_len
, proto_len
,
4281 if (is_tso
|| !push_len
) {
4282 /* HW header needs its own buffer element. */
4283 hd_len
= hw_hdr_len
+ proto_len
;
4284 data_offset
= push_len
+ proto_len
;
4286 memset(hdr
, 0, hw_hdr_len
);
4287 fill_header(queue
, hdr
, skb
, proto
, frame_len
);
4289 qeth_fill_tso_ext((struct qeth_hdr_tso
*) hdr
,
4290 frame_len
- proto_len
, skb
, proto_len
);
4293 rc
= __qeth_xmit(card
, queue
, skb
, elements
, hdr
, data_offset
,
4296 /* TODO: drop skb_orphan() once TX completion is fast enough */
4298 spin_lock(&queue
->lock
);
4299 rc
= qeth_do_send_packet(card
, queue
, skb
, hdr
, data_offset
,
4301 spin_unlock(&queue
->lock
);
4304 if (rc
&& !push_len
)
4305 kmem_cache_free(qeth_core_header_cache
, hdr
);
4309 EXPORT_SYMBOL_GPL(qeth_xmit
);
4311 static int qeth_setadp_promisc_mode_cb(struct qeth_card
*card
,
4312 struct qeth_reply
*reply
, unsigned long data
)
4314 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4315 struct qeth_ipacmd_setadpparms
*setparms
;
4317 QETH_CARD_TEXT(card
, 4, "prmadpcb");
4319 setparms
= &(cmd
->data
.setadapterparms
);
4320 if (qeth_setadpparms_inspect_rc(cmd
)) {
4321 QETH_CARD_TEXT_(card
, 4, "prmrc%x", cmd
->hdr
.return_code
);
4322 setparms
->data
.mode
= SET_PROMISC_MODE_OFF
;
4324 card
->info
.promisc_mode
= setparms
->data
.mode
;
4325 return (cmd
->hdr
.return_code
) ? -EIO
: 0;
4328 void qeth_setadp_promisc_mode(struct qeth_card
*card
, bool enable
)
4330 enum qeth_ipa_promisc_modes mode
= enable
? SET_PROMISC_MODE_ON
:
4331 SET_PROMISC_MODE_OFF
;
4332 struct qeth_cmd_buffer
*iob
;
4333 struct qeth_ipa_cmd
*cmd
;
4335 QETH_CARD_TEXT(card
, 4, "setprom");
4336 QETH_CARD_TEXT_(card
, 4, "mode:%x", mode
);
4338 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_PROMISC_MODE
,
4339 SETADP_DATA_SIZEOF(mode
));
4342 cmd
= __ipa_cmd(iob
);
4343 cmd
->data
.setadapterparms
.data
.mode
= mode
;
4344 qeth_send_ipa_cmd(card
, iob
, qeth_setadp_promisc_mode_cb
, NULL
);
4346 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode
);
4348 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card
*card
,
4349 struct qeth_reply
*reply
, unsigned long data
)
4351 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4352 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4354 QETH_CARD_TEXT(card
, 4, "chgmaccb");
4355 if (qeth_setadpparms_inspect_rc(cmd
))
4358 adp_cmd
= &cmd
->data
.setadapterparms
;
4359 if (!is_valid_ether_addr(adp_cmd
->data
.change_addr
.addr
))
4360 return -EADDRNOTAVAIL
;
4362 if (IS_LAYER2(card
) && IS_OSD(card
) && !IS_VM_NIC(card
) &&
4363 !(adp_cmd
->hdr
.flags
& QETH_SETADP_FLAGS_VIRTUAL_MAC
))
4364 return -EADDRNOTAVAIL
;
4366 eth_hw_addr_set(card
->dev
, adp_cmd
->data
.change_addr
.addr
);
4370 int qeth_setadpparms_change_macaddr(struct qeth_card
*card
)
4373 struct qeth_cmd_buffer
*iob
;
4374 struct qeth_ipa_cmd
*cmd
;
4376 QETH_CARD_TEXT(card
, 4, "chgmac");
4378 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_ALTER_MAC_ADDRESS
,
4379 SETADP_DATA_SIZEOF(change_addr
));
4382 cmd
= __ipa_cmd(iob
);
4383 cmd
->data
.setadapterparms
.data
.change_addr
.cmd
= CHANGE_ADDR_READ_MAC
;
4384 cmd
->data
.setadapterparms
.data
.change_addr
.addr_size
= ETH_ALEN
;
4385 ether_addr_copy(cmd
->data
.setadapterparms
.data
.change_addr
.addr
,
4386 card
->dev
->dev_addr
);
4387 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_change_macaddr_cb
,
4391 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr
);
4393 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card
*card
,
4394 struct qeth_reply
*reply
, unsigned long data
)
4396 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4397 struct qeth_set_access_ctrl
*access_ctrl_req
;
4399 QETH_CARD_TEXT(card
, 4, "setaccb");
4401 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4402 QETH_CARD_TEXT_(card
, 2, "rc=%d",
4403 cmd
->data
.setadapterparms
.hdr
.return_code
);
4404 if (cmd
->data
.setadapterparms
.hdr
.return_code
!=
4405 SET_ACCESS_CTRL_RC_SUCCESS
)
4406 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4407 access_ctrl_req
->subcmd_code
, CARD_DEVID(card
),
4408 cmd
->data
.setadapterparms
.hdr
.return_code
);
4409 switch (qeth_setadpparms_inspect_rc(cmd
)) {
4410 case SET_ACCESS_CTRL_RC_SUCCESS
:
4411 if (access_ctrl_req
->subcmd_code
== ISOLATION_MODE_NONE
)
4412 dev_info(&card
->gdev
->dev
,
4413 "QDIO data connection isolation is deactivated\n");
4415 dev_info(&card
->gdev
->dev
,
4416 "QDIO data connection isolation is activated\n");
4418 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED
:
4419 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4422 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED
:
4423 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4426 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED
:
4427 dev_err(&card
->gdev
->dev
, "Adapter does not "
4428 "support QDIO data connection isolation\n");
4430 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER
:
4431 dev_err(&card
->gdev
->dev
,
4432 "Adapter is dedicated. "
4433 "QDIO data connection isolation not supported\n");
4435 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF
:
4436 dev_err(&card
->gdev
->dev
,
4437 "TSO does not permit QDIO data connection isolation\n");
4439 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED
:
4440 dev_err(&card
->gdev
->dev
, "The adjacent switch port does not "
4441 "support reflective relay mode\n");
4443 case SET_ACCESS_CTRL_RC_REFLREL_FAILED
:
4444 dev_err(&card
->gdev
->dev
, "The reflective relay mode cannot be "
4445 "enabled at the adjacent switch port");
4447 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED
:
4448 dev_warn(&card
->gdev
->dev
, "Turning off reflective relay mode "
4449 "at the adjacent switch failed\n");
4450 /* benign error while disabling ISOLATION_MODE_FWD */
4457 int qeth_setadpparms_set_access_ctrl(struct qeth_card
*card
,
4458 enum qeth_ipa_isolation_modes mode
)
4461 struct qeth_cmd_buffer
*iob
;
4462 struct qeth_ipa_cmd
*cmd
;
4463 struct qeth_set_access_ctrl
*access_ctrl_req
;
4465 QETH_CARD_TEXT(card
, 4, "setacctl");
4467 if (!qeth_adp_supported(card
, IPA_SETADP_SET_ACCESS_CONTROL
)) {
4468 dev_err(&card
->gdev
->dev
,
4469 "Adapter does not support QDIO data connection isolation\n");
4473 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_ACCESS_CONTROL
,
4474 SETADP_DATA_SIZEOF(set_access_ctrl
));
4477 cmd
= __ipa_cmd(iob
);
4478 access_ctrl_req
= &cmd
->data
.setadapterparms
.data
.set_access_ctrl
;
4479 access_ctrl_req
->subcmd_code
= mode
;
4481 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_set_access_ctrl_cb
,
4484 QETH_CARD_TEXT_(card
, 2, "rc=%d", rc
);
4485 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4486 rc
, CARD_DEVID(card
));
4492 void qeth_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4494 struct qeth_card
*card
;
4496 card
= dev
->ml_priv
;
4497 QETH_CARD_TEXT(card
, 4, "txtimeo");
4498 qeth_schedule_recovery(card
);
4500 EXPORT_SYMBOL_GPL(qeth_tx_timeout
);
4502 static int qeth_mdio_read(struct net_device
*dev
, int phy_id
, int regnum
)
4504 struct qeth_card
*card
= dev
->ml_priv
;
4508 case MII_BMCR
: /* Basic mode control register */
4510 if ((card
->info
.link_type
!= QETH_LINK_TYPE_GBIT_ETH
) &&
4511 (card
->info
.link_type
!= QETH_LINK_TYPE_10GBIT_ETH
) &&
4512 (card
->info
.link_type
!= QETH_LINK_TYPE_25GBIT_ETH
))
4513 rc
|= BMCR_SPEED100
;
4515 case MII_BMSR
: /* Basic mode status register */
4516 rc
= BMSR_ERCAP
| BMSR_ANEGCOMPLETE
| BMSR_LSTATUS
|
4517 BMSR_10HALF
| BMSR_10FULL
| BMSR_100HALF
| BMSR_100FULL
|
4520 case MII_PHYSID1
: /* PHYS ID 1 */
4521 rc
= (dev
->dev_addr
[0] << 16) | (dev
->dev_addr
[1] << 8) |
4523 rc
= (rc
>> 5) & 0xFFFF;
4525 case MII_PHYSID2
: /* PHYS ID 2 */
4526 rc
= (dev
->dev_addr
[2] << 10) & 0xFFFF;
4528 case MII_ADVERTISE
: /* Advertisement control reg */
4531 case MII_LPA
: /* Link partner ability reg */
4532 rc
= LPA_10HALF
| LPA_10FULL
| LPA_100HALF
| LPA_100FULL
|
4533 LPA_100BASE4
| LPA_LPACK
;
4535 case MII_EXPANSION
: /* Expansion register */
4537 case MII_DCOUNTER
: /* disconnect counter */
4539 case MII_FCSCOUNTER
: /* false carrier counter */
4541 case MII_NWAYTEST
: /* N-way auto-neg test register */
4543 case MII_RERRCOUNTER
: /* rx error counter */
4544 rc
= card
->stats
.rx_length_errors
+
4545 card
->stats
.rx_frame_errors
+
4546 card
->stats
.rx_fifo_errors
;
4548 case MII_SREVISION
: /* silicon revision */
4550 case MII_RESV1
: /* reserved 1 */
4552 case MII_LBRERROR
: /* loopback, rx, bypass error */
4554 case MII_PHYADDR
: /* physical address */
4556 case MII_RESV2
: /* reserved 2 */
4558 case MII_TPISTATUS
: /* TPI status for 10mbps */
4560 case MII_NCONFIG
: /* network interface config */
4568 static int qeth_snmp_command_cb(struct qeth_card
*card
,
4569 struct qeth_reply
*reply
, unsigned long data
)
4571 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
4572 struct qeth_arp_query_info
*qinfo
= reply
->param
;
4573 struct qeth_ipacmd_setadpparms
*adp_cmd
;
4574 unsigned int data_len
;
4577 QETH_CARD_TEXT(card
, 3, "snpcmdcb");
4579 if (cmd
->hdr
.return_code
) {
4580 QETH_CARD_TEXT_(card
, 4, "scer1%x", cmd
->hdr
.return_code
);
4583 if (cmd
->data
.setadapterparms
.hdr
.return_code
) {
4584 cmd
->hdr
.return_code
=
4585 cmd
->data
.setadapterparms
.hdr
.return_code
;
4586 QETH_CARD_TEXT_(card
, 4, "scer2%x", cmd
->hdr
.return_code
);
4590 adp_cmd
= &cmd
->data
.setadapterparms
;
4591 data_len
= adp_cmd
->hdr
.cmdlength
- sizeof(adp_cmd
->hdr
);
4592 if (adp_cmd
->hdr
.seq_no
== 1) {
4593 snmp_data
= &adp_cmd
->data
.snmp
;
4595 snmp_data
= &adp_cmd
->data
.snmp
.request
;
4596 data_len
-= offsetof(struct qeth_snmp_cmd
, request
);
4599 /* check if there is enough room in userspace */
4600 if ((qinfo
->udata_len
- qinfo
->udata_offset
) < data_len
) {
4601 QETH_CARD_TEXT_(card
, 4, "scer3%i", -ENOSPC
);
4604 QETH_CARD_TEXT_(card
, 4, "snore%i",
4605 cmd
->data
.setadapterparms
.hdr
.used_total
);
4606 QETH_CARD_TEXT_(card
, 4, "sseqn%i",
4607 cmd
->data
.setadapterparms
.hdr
.seq_no
);
4608 /*copy entries to user buffer*/
4609 memcpy(qinfo
->udata
+ qinfo
->udata_offset
, snmp_data
, data_len
);
4610 qinfo
->udata_offset
+= data_len
;
4612 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4613 cmd
->data
.setadapterparms
.hdr
.used_total
)
4618 static int qeth_snmp_command(struct qeth_card
*card
, char __user
*udata
)
4620 struct qeth_snmp_ureq __user
*ureq
;
4621 struct qeth_cmd_buffer
*iob
;
4622 unsigned int req_len
;
4623 struct qeth_arp_query_info qinfo
= {0, };
4626 QETH_CARD_TEXT(card
, 3, "snmpcmd");
4628 if (IS_VM_NIC(card
))
4631 if ((!qeth_adp_supported(card
, IPA_SETADP_SET_SNMP_CONTROL
)) &&
4635 ureq
= (struct qeth_snmp_ureq __user
*) udata
;
4636 if (get_user(qinfo
.udata_len
, &ureq
->hdr
.data_len
) ||
4637 get_user(req_len
, &ureq
->hdr
.req_len
))
4640 /* Sanitize user input, to avoid overflows in iob size calculation: */
4641 if (req_len
> QETH_BUFSIZE
)
4644 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_SET_SNMP_CONTROL
, req_len
);
4648 if (copy_from_user(&__ipa_cmd(iob
)->data
.setadapterparms
.data
.snmp
,
4649 &ureq
->cmd
, req_len
)) {
4654 qinfo
.udata
= kzalloc(qinfo
.udata_len
, GFP_KERNEL
);
4659 qinfo
.udata_offset
= sizeof(struct qeth_snmp_ureq_hdr
);
4661 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_snmp_command_cb
, &qinfo
);
4663 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4664 CARD_DEVID(card
), rc
);
4666 if (copy_to_user(udata
, qinfo
.udata
, qinfo
.udata_len
))
4674 static int qeth_setadpparms_query_oat_cb(struct qeth_card
*card
,
4675 struct qeth_reply
*reply
,
4678 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4679 struct qeth_qoat_priv
*priv
= reply
->param
;
4682 QETH_CARD_TEXT(card
, 3, "qoatcb");
4683 if (qeth_setadpparms_inspect_rc(cmd
))
4686 resdatalen
= cmd
->data
.setadapterparms
.hdr
.cmdlength
;
4688 if (resdatalen
> (priv
->buffer_len
- priv
->response_len
))
4691 memcpy(priv
->buffer
+ priv
->response_len
,
4692 &cmd
->data
.setadapterparms
.hdr
, resdatalen
);
4693 priv
->response_len
+= resdatalen
;
4695 if (cmd
->data
.setadapterparms
.hdr
.seq_no
<
4696 cmd
->data
.setadapterparms
.hdr
.used_total
)
4701 static int qeth_query_oat_command(struct qeth_card
*card
, char __user
*udata
)
4704 struct qeth_cmd_buffer
*iob
;
4705 struct qeth_ipa_cmd
*cmd
;
4706 struct qeth_query_oat
*oat_req
;
4707 struct qeth_query_oat_data oat_data
;
4708 struct qeth_qoat_priv priv
;
4711 QETH_CARD_TEXT(card
, 3, "qoatcmd");
4713 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
))
4716 if (copy_from_user(&oat_data
, udata
, sizeof(oat_data
)))
4719 priv
.buffer_len
= oat_data
.buffer_len
;
4720 priv
.response_len
= 0;
4721 priv
.buffer
= vzalloc(oat_data
.buffer_len
);
4725 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4726 SETADP_DATA_SIZEOF(query_oat
));
4731 cmd
= __ipa_cmd(iob
);
4732 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4733 oat_req
->subcmd_code
= oat_data
.command
;
4735 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setadpparms_query_oat_cb
, &priv
);
4737 tmp
= is_compat_task() ? compat_ptr(oat_data
.ptr
) :
4738 u64_to_user_ptr(oat_data
.ptr
);
4739 oat_data
.response_len
= priv
.response_len
;
4741 if (copy_to_user(tmp
, priv
.buffer
, priv
.response_len
) ||
4742 copy_to_user(udata
, &oat_data
, sizeof(oat_data
)))
4751 static int qeth_query_card_info_cb(struct qeth_card
*card
,
4752 struct qeth_reply
*reply
, unsigned long data
)
4754 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4755 struct qeth_link_info
*link_info
= reply
->param
;
4756 struct qeth_query_card_info
*card_info
;
4758 QETH_CARD_TEXT(card
, 2, "qcrdincb");
4759 if (qeth_setadpparms_inspect_rc(cmd
))
4762 card_info
= &cmd
->data
.setadapterparms
.data
.card_info
;
4763 netdev_dbg(card
->dev
,
4764 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
4765 card_info
->card_type
, card_info
->port_mode
,
4766 card_info
->port_speed
);
4768 switch (card_info
->port_mode
) {
4769 case CARD_INFO_PORTM_FULLDUPLEX
:
4770 link_info
->duplex
= DUPLEX_FULL
;
4772 case CARD_INFO_PORTM_HALFDUPLEX
:
4773 link_info
->duplex
= DUPLEX_HALF
;
4776 link_info
->duplex
= DUPLEX_UNKNOWN
;
4779 switch (card_info
->card_type
) {
4780 case CARD_INFO_TYPE_1G_COPPER_A
:
4781 case CARD_INFO_TYPE_1G_COPPER_B
:
4782 link_info
->speed
= SPEED_1000
;
4783 link_info
->port
= PORT_TP
;
4785 case CARD_INFO_TYPE_1G_FIBRE_A
:
4786 case CARD_INFO_TYPE_1G_FIBRE_B
:
4787 link_info
->speed
= SPEED_1000
;
4788 link_info
->port
= PORT_FIBRE
;
4790 case CARD_INFO_TYPE_10G_FIBRE_A
:
4791 case CARD_INFO_TYPE_10G_FIBRE_B
:
4792 link_info
->speed
= SPEED_10000
;
4793 link_info
->port
= PORT_FIBRE
;
4796 switch (card_info
->port_speed
) {
4797 case CARD_INFO_PORTS_10M
:
4798 link_info
->speed
= SPEED_10
;
4800 case CARD_INFO_PORTS_100M
:
4801 link_info
->speed
= SPEED_100
;
4803 case CARD_INFO_PORTS_1G
:
4804 link_info
->speed
= SPEED_1000
;
4806 case CARD_INFO_PORTS_10G
:
4807 link_info
->speed
= SPEED_10000
;
4809 case CARD_INFO_PORTS_25G
:
4810 link_info
->speed
= SPEED_25000
;
4813 link_info
->speed
= SPEED_UNKNOWN
;
4816 link_info
->port
= PORT_OTHER
;
4822 int qeth_query_card_info(struct qeth_card
*card
,
4823 struct qeth_link_info
*link_info
)
4825 struct qeth_cmd_buffer
*iob
;
4827 QETH_CARD_TEXT(card
, 2, "qcrdinfo");
4828 if (!qeth_adp_supported(card
, IPA_SETADP_QUERY_CARD_INFO
))
4830 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_CARD_INFO
, 0);
4834 return qeth_send_ipa_cmd(card
, iob
, qeth_query_card_info_cb
, link_info
);
4837 static int qeth_init_link_info_oat_cb(struct qeth_card
*card
,
4838 struct qeth_reply
*reply_priv
,
4841 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*)data
;
4842 struct qeth_link_info
*link_info
= reply_priv
->param
;
4843 struct qeth_query_oat_physical_if
*phys_if
;
4844 struct qeth_query_oat_reply
*reply
;
4846 if (qeth_setadpparms_inspect_rc(cmd
))
4849 /* Multi-part reply is unexpected, don't bother: */
4850 if (cmd
->data
.setadapterparms
.hdr
.used_total
> 1)
4853 /* Expect the reply to start with phys_if data: */
4854 reply
= &cmd
->data
.setadapterparms
.data
.query_oat
.reply
[0];
4855 if (reply
->type
!= QETH_QOAT_REPLY_TYPE_PHYS_IF
||
4856 reply
->length
< sizeof(*reply
))
4859 phys_if
= &reply
->phys_if
;
4861 switch (phys_if
->speed_duplex
) {
4862 case QETH_QOAT_PHYS_SPEED_10M_HALF
:
4863 link_info
->speed
= SPEED_10
;
4864 link_info
->duplex
= DUPLEX_HALF
;
4866 case QETH_QOAT_PHYS_SPEED_10M_FULL
:
4867 link_info
->speed
= SPEED_10
;
4868 link_info
->duplex
= DUPLEX_FULL
;
4870 case QETH_QOAT_PHYS_SPEED_100M_HALF
:
4871 link_info
->speed
= SPEED_100
;
4872 link_info
->duplex
= DUPLEX_HALF
;
4874 case QETH_QOAT_PHYS_SPEED_100M_FULL
:
4875 link_info
->speed
= SPEED_100
;
4876 link_info
->duplex
= DUPLEX_FULL
;
4878 case QETH_QOAT_PHYS_SPEED_1000M_HALF
:
4879 link_info
->speed
= SPEED_1000
;
4880 link_info
->duplex
= DUPLEX_HALF
;
4882 case QETH_QOAT_PHYS_SPEED_1000M_FULL
:
4883 link_info
->speed
= SPEED_1000
;
4884 link_info
->duplex
= DUPLEX_FULL
;
4886 case QETH_QOAT_PHYS_SPEED_10G_FULL
:
4887 link_info
->speed
= SPEED_10000
;
4888 link_info
->duplex
= DUPLEX_FULL
;
4890 case QETH_QOAT_PHYS_SPEED_25G_FULL
:
4891 link_info
->speed
= SPEED_25000
;
4892 link_info
->duplex
= DUPLEX_FULL
;
4894 case QETH_QOAT_PHYS_SPEED_UNKNOWN
:
4896 link_info
->speed
= SPEED_UNKNOWN
;
4897 link_info
->duplex
= DUPLEX_UNKNOWN
;
4901 switch (phys_if
->media_type
) {
4902 case QETH_QOAT_PHYS_MEDIA_COPPER
:
4903 link_info
->port
= PORT_TP
;
4904 link_info
->link_mode
= QETH_LINK_MODE_UNKNOWN
;
4906 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT
:
4907 link_info
->port
= PORT_FIBRE
;
4908 link_info
->link_mode
= QETH_LINK_MODE_FIBRE_SHORT
;
4910 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG
:
4911 link_info
->port
= PORT_FIBRE
;
4912 link_info
->link_mode
= QETH_LINK_MODE_FIBRE_LONG
;
4915 link_info
->port
= PORT_OTHER
;
4916 link_info
->link_mode
= QETH_LINK_MODE_UNKNOWN
;
4923 static void qeth_init_link_info(struct qeth_card
*card
)
4925 card
->info
.link_info
.duplex
= DUPLEX_FULL
;
4927 if (IS_IQD(card
) || IS_VM_NIC(card
)) {
4928 card
->info
.link_info
.speed
= SPEED_10000
;
4929 card
->info
.link_info
.port
= PORT_FIBRE
;
4930 card
->info
.link_info
.link_mode
= QETH_LINK_MODE_FIBRE_SHORT
;
4932 switch (card
->info
.link_type
) {
4933 case QETH_LINK_TYPE_FAST_ETH
:
4934 case QETH_LINK_TYPE_LANE_ETH100
:
4935 card
->info
.link_info
.speed
= SPEED_100
;
4936 card
->info
.link_info
.port
= PORT_TP
;
4938 case QETH_LINK_TYPE_GBIT_ETH
:
4939 case QETH_LINK_TYPE_LANE_ETH1000
:
4940 card
->info
.link_info
.speed
= SPEED_1000
;
4941 card
->info
.link_info
.port
= PORT_FIBRE
;
4943 case QETH_LINK_TYPE_10GBIT_ETH
:
4944 card
->info
.link_info
.speed
= SPEED_10000
;
4945 card
->info
.link_info
.port
= PORT_FIBRE
;
4947 case QETH_LINK_TYPE_25GBIT_ETH
:
4948 card
->info
.link_info
.speed
= SPEED_25000
;
4949 card
->info
.link_info
.port
= PORT_FIBRE
;
4952 dev_info(&card
->gdev
->dev
, "Unknown link type %x\n",
4953 card
->info
.link_type
);
4954 card
->info
.link_info
.speed
= SPEED_UNKNOWN
;
4955 card
->info
.link_info
.port
= PORT_OTHER
;
4958 card
->info
.link_info
.link_mode
= QETH_LINK_MODE_UNKNOWN
;
4961 /* Get more accurate data via QUERY OAT: */
4962 if (qeth_adp_supported(card
, IPA_SETADP_QUERY_OAT
)) {
4963 struct qeth_link_info link_info
;
4964 struct qeth_cmd_buffer
*iob
;
4966 iob
= qeth_get_adapter_cmd(card
, IPA_SETADP_QUERY_OAT
,
4967 SETADP_DATA_SIZEOF(query_oat
));
4969 struct qeth_ipa_cmd
*cmd
= __ipa_cmd(iob
);
4970 struct qeth_query_oat
*oat_req
;
4972 oat_req
= &cmd
->data
.setadapterparms
.data
.query_oat
;
4973 oat_req
->subcmd_code
= QETH_QOAT_SCOPE_INTERFACE
;
4975 if (!qeth_send_ipa_cmd(card
, iob
,
4976 qeth_init_link_info_oat_cb
,
4978 if (link_info
.speed
!= SPEED_UNKNOWN
)
4979 card
->info
.link_info
.speed
= link_info
.speed
;
4980 if (link_info
.duplex
!= DUPLEX_UNKNOWN
)
4981 card
->info
.link_info
.duplex
= link_info
.duplex
;
4982 if (link_info
.port
!= PORT_OTHER
)
4983 card
->info
.link_info
.port
= link_info
.port
;
4984 if (link_info
.link_mode
!= QETH_LINK_MODE_UNKNOWN
)
4985 card
->info
.link_info
.link_mode
= link_info
.link_mode
;
4992 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4993 * @card: pointer to a qeth_card
4996 * 0, if a MAC address has been set for the card's netdevice
4997 * a return code, for various error conditions
4999 int qeth_vm_request_mac(struct qeth_card
*card
)
5001 struct diag26c_mac_resp
*response
;
5002 struct diag26c_mac_req
*request
;
5005 QETH_CARD_TEXT(card
, 2, "vmreqmac");
5007 request
= kzalloc(sizeof(*request
), GFP_KERNEL
| GFP_DMA
);
5008 response
= kzalloc(sizeof(*response
), GFP_KERNEL
| GFP_DMA
);
5009 if (!request
|| !response
) {
5014 request
->resp_buf_len
= sizeof(*response
);
5015 request
->resp_version
= DIAG26C_VERSION2
;
5016 request
->op_code
= DIAG26C_GET_MAC
;
5017 request
->devno
= card
->info
.ddev_devno
;
5019 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
5020 rc
= diag26c(request
, response
, DIAG26C_MAC_SERVICES
);
5021 QETH_DBF_HEX(CTRL
, 2, request
, sizeof(*request
));
5024 QETH_DBF_HEX(CTRL
, 2, response
, sizeof(*response
));
5026 if (request
->resp_buf_len
< sizeof(*response
) ||
5027 response
->version
!= request
->resp_version
) {
5029 QETH_CARD_TEXT(card
, 2, "badresp");
5030 QETH_CARD_HEX(card
, 2, &request
->resp_buf_len
,
5031 sizeof(request
->resp_buf_len
));
5032 } else if (!is_valid_ether_addr(response
->mac
)) {
5034 QETH_CARD_TEXT(card
, 2, "badmac");
5035 QETH_CARD_HEX(card
, 2, response
->mac
, ETH_ALEN
);
5037 eth_hw_addr_set(card
->dev
, response
->mac
);
5045 EXPORT_SYMBOL_GPL(qeth_vm_request_mac
);
5047 static void qeth_determine_capabilities(struct qeth_card
*card
)
5049 struct qeth_channel
*channel
= &card
->data
;
5050 struct ccw_device
*ddev
= channel
->ccwdev
;
5052 int ddev_offline
= 0;
5054 QETH_CARD_TEXT(card
, 2, "detcapab");
5055 if (!ddev
->online
) {
5057 rc
= qeth_start_channel(channel
);
5059 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
5064 rc
= qeth_read_conf_data(card
);
5066 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5067 CARD_DEVID(card
), rc
);
5068 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
5072 rc
= qdio_get_ssqd_desc(ddev
, &card
->ssqd
);
5074 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5076 QETH_CARD_TEXT_(card
, 2, "qfmt%d", card
->ssqd
.qfmt
);
5077 QETH_CARD_TEXT_(card
, 2, "ac1:%02x", card
->ssqd
.qdioac1
);
5078 QETH_CARD_TEXT_(card
, 2, "ac2:%04x", card
->ssqd
.qdioac2
);
5079 QETH_CARD_TEXT_(card
, 2, "ac3:%04x", card
->ssqd
.qdioac3
);
5080 QETH_CARD_TEXT_(card
, 2, "icnt%d", card
->ssqd
.icnt
);
5081 if (!((card
->ssqd
.qfmt
!= QDIO_IQDIO_QFMT
) ||
5082 ((card
->ssqd
.qdioac1
& CHSC_AC1_INITIATE_INPUTQ
) == 0) ||
5083 ((card
->ssqd
.qdioac3
& CHSC_AC3_FORMAT2_CQ_AVAILABLE
) == 0))) {
5084 dev_info(&card
->gdev
->dev
,
5085 "Completion Queueing supported\n");
5087 card
->options
.cq
= QETH_CQ_NOTAVAILABLE
;
5091 if (ddev_offline
== 1)
5092 qeth_stop_channel(channel
);
5097 static void qeth_read_ccw_conf_data(struct qeth_card
*card
)
5099 struct qeth_card_info
*info
= &card
->info
;
5100 struct ccw_device
*cdev
= CARD_DDEV(card
);
5101 struct ccw_dev_id dev_id
;
5103 QETH_CARD_TEXT(card
, 2, "ccwconfd");
5104 ccw_device_get_id(cdev
, &dev_id
);
5106 info
->ddev_devno
= dev_id
.devno
;
5107 info
->ids_valid
= !ccw_device_get_cssid(cdev
, &info
->cssid
) &&
5108 !ccw_device_get_iid(cdev
, &info
->iid
) &&
5109 !ccw_device_get_chid(cdev
, 0, &info
->chid
);
5110 info
->ssid
= dev_id
.ssid
;
5112 dev_info(&card
->gdev
->dev
, "CHID: %x CHPID: %x\n",
5113 info
->chid
, info
->chpid
);
5115 QETH_CARD_TEXT_(card
, 3, "devn%x", info
->ddev_devno
);
5116 QETH_CARD_TEXT_(card
, 3, "cssid:%x", info
->cssid
);
5117 QETH_CARD_TEXT_(card
, 3, "iid:%x", info
->iid
);
5118 QETH_CARD_TEXT_(card
, 3, "ssid:%x", info
->ssid
);
5119 QETH_CARD_TEXT_(card
, 3, "chpid:%x", info
->chpid
);
5120 QETH_CARD_TEXT_(card
, 3, "chid:%x", info
->chid
);
5121 QETH_CARD_TEXT_(card
, 3, "idval%x", info
->ids_valid
);
5124 static int qeth_qdio_establish(struct qeth_card
*card
)
5126 struct qdio_buffer
**out_sbal_ptrs
[QETH_MAX_OUT_QUEUES
];
5127 struct qdio_buffer
**in_sbal_ptrs
[QETH_MAX_IN_QUEUES
];
5128 struct qeth_qib_parms
*qib_parms
= NULL
;
5129 struct qdio_initialize init_data
;
5130 unsigned int no_input_qs
= 1;
5134 QETH_CARD_TEXT(card
, 2, "qdioest");
5136 if (!IS_IQD(card
) && !IS_VM_NIC(card
)) {
5137 qib_parms
= kzalloc(sizeof_field(struct qib
, parm
), GFP_KERNEL
);
5141 qeth_fill_qib_parms(card
, qib_parms
);
5144 in_sbal_ptrs
[0] = card
->qdio
.in_q
->qdio_bufs
;
5145 if (card
->options
.cq
== QETH_CQ_ENABLED
) {
5146 in_sbal_ptrs
[1] = card
->qdio
.c_q
->qdio_bufs
;
5150 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++)
5151 out_sbal_ptrs
[i
] = card
->qdio
.out_qs
[i
]->qdio_bufs
;
5153 memset(&init_data
, 0, sizeof(struct qdio_initialize
));
5154 init_data
.q_format
= IS_IQD(card
) ? QDIO_IQDIO_QFMT
:
5156 init_data
.qib_param_field_format
= 0;
5157 init_data
.qib_param_field
= (void *)qib_parms
;
5158 init_data
.no_input_qs
= no_input_qs
;
5159 init_data
.no_output_qs
= card
->qdio
.no_out_queues
;
5160 init_data
.input_handler
= qeth_qdio_input_handler
;
5161 init_data
.output_handler
= qeth_qdio_output_handler
;
5162 init_data
.irq_poll
= qeth_qdio_poll
;
5163 init_data
.int_parm
= (unsigned long) card
;
5164 init_data
.input_sbal_addr_array
= in_sbal_ptrs
;
5165 init_data
.output_sbal_addr_array
= out_sbal_ptrs
;
5167 if (atomic_cmpxchg(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
,
5168 QETH_QDIO_ESTABLISHED
) == QETH_QDIO_ALLOCATED
) {
5169 rc
= qdio_allocate(CARD_DDEV(card
), init_data
.no_input_qs
,
5170 init_data
.no_output_qs
);
5172 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5175 rc
= qdio_establish(CARD_DDEV(card
), &init_data
);
5177 atomic_set(&card
->qdio
.state
, QETH_QDIO_ALLOCATED
);
5178 qdio_free(CARD_DDEV(card
));
5182 switch (card
->options
.cq
) {
5183 case QETH_CQ_ENABLED
:
5184 dev_info(&card
->gdev
->dev
, "Completion Queue support enabled");
5186 case QETH_CQ_DISABLED
:
5187 dev_info(&card
->gdev
->dev
, "Completion Queue support disabled");
5198 static void qeth_core_free_card(struct qeth_card
*card
)
5200 QETH_CARD_TEXT(card
, 2, "freecrd");
5202 unregister_service_level(&card
->qeth_service_level
);
5203 debugfs_remove_recursive(card
->debugfs
);
5204 qeth_put_cmd(card
->read_cmd
);
5205 destroy_workqueue(card
->event_wq
);
5206 dev_set_drvdata(&card
->gdev
->dev
, NULL
);
5210 static void qeth_trace_features(struct qeth_card
*card
)
5212 QETH_CARD_TEXT(card
, 2, "features");
5213 QETH_CARD_HEX(card
, 2, &card
->options
.ipa4
, sizeof(card
->options
.ipa4
));
5214 QETH_CARD_HEX(card
, 2, &card
->options
.ipa6
, sizeof(card
->options
.ipa6
));
5215 QETH_CARD_HEX(card
, 2, &card
->options
.adp
, sizeof(card
->options
.adp
));
5216 QETH_CARD_HEX(card
, 2, &card
->info
.diagass_support
,
5217 sizeof(card
->info
.diagass_support
));
5220 static struct ccw_device_id qeth_ids
[] = {
5221 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5222 .driver_info
= QETH_CARD_TYPE_OSD
},
5223 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5224 .driver_info
= QETH_CARD_TYPE_IQD
},
5225 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5226 .driver_info
= QETH_CARD_TYPE_OSM
},
5227 #ifdef CONFIG_QETH_OSX
5228 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5229 .driver_info
= QETH_CARD_TYPE_OSX
},
5233 MODULE_DEVICE_TABLE(ccw
, qeth_ids
);
5235 static struct ccw_driver qeth_ccw_driver
= {
5237 .owner
= THIS_MODULE
,
5241 .probe
= ccwgroup_probe_ccwdev
,
5242 .remove
= ccwgroup_remove_ccwdev
,
5245 static int qeth_hardsetup_card(struct qeth_card
*card
, bool *carrier_ok
)
5250 QETH_CARD_TEXT(card
, 2, "hrdsetup");
5251 atomic_set(&card
->force_alloc_skb
, 0);
5252 rc
= qeth_update_from_chp_desc(card
);
5257 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5259 rc
= qeth_qdio_clear_card(card
, !IS_IQD(card
));
5260 qeth_stop_channel(&card
->data
);
5261 qeth_stop_channel(&card
->write
);
5262 qeth_stop_channel(&card
->read
);
5263 qdio_free(CARD_DDEV(card
));
5265 rc
= qeth_start_channel(&card
->read
);
5268 rc
= qeth_start_channel(&card
->write
);
5271 rc
= qeth_start_channel(&card
->data
);
5275 if (rc
== -ERESTARTSYS
) {
5276 QETH_CARD_TEXT(card
, 2, "break1");
5279 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5286 qeth_determine_capabilities(card
);
5287 qeth_read_ccw_conf_data(card
);
5288 qeth_idx_init(card
);
5290 rc
= qeth_idx_activate_read_channel(card
);
5292 QETH_CARD_TEXT(card
, 2, "break2");
5295 QETH_CARD_TEXT_(card
, 2, "3err%d", rc
);
5302 rc
= qeth_idx_activate_write_channel(card
);
5304 QETH_CARD_TEXT(card
, 2, "break3");
5307 QETH_CARD_TEXT_(card
, 2, "4err%d", rc
);
5313 card
->read_or_write_problem
= 0;
5314 rc
= qeth_mpc_initialize(card
);
5316 QETH_CARD_TEXT_(card
, 2, "5err%d", rc
);
5320 rc
= qeth_send_startlan(card
);
5322 QETH_CARD_TEXT_(card
, 2, "6err%d", rc
);
5323 if (rc
== -ENETDOWN
) {
5324 dev_warn(&card
->gdev
->dev
, "The LAN is offline\n");
5325 *carrier_ok
= false;
5333 card
->options
.ipa4
.supported
= 0;
5334 card
->options
.ipa6
.supported
= 0;
5335 card
->options
.adp
.supported
= 0;
5336 card
->options
.sbp
.supported_funcs
= 0;
5337 card
->info
.diagass_support
= 0;
5338 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV4
);
5341 if (qeth_is_supported(card
, IPA_IPV6
)) {
5342 rc
= qeth_query_ipassists(card
, QETH_PROT_IPV6
);
5346 if (qeth_is_supported(card
, IPA_SETADAPTERPARMS
)) {
5347 rc
= qeth_query_setadapterparms(card
);
5349 QETH_CARD_TEXT_(card
, 2, "7err%d", rc
);
5353 if (qeth_adp_supported(card
, IPA_SETADP_SET_DIAG_ASSIST
)) {
5354 rc
= qeth_query_setdiagass(card
);
5356 QETH_CARD_TEXT_(card
, 2, "8err%d", rc
);
5359 qeth_trace_features(card
);
5361 if (!qeth_is_diagass_supported(card
, QETH_DIAGS_CMD_TRAP
) ||
5362 (card
->info
.hwtrap
&& qeth_hw_trap(card
, QETH_DIAGS_TRAP_ARM
)))
5363 card
->info
.hwtrap
= 0;
5365 if (card
->options
.isolation
!= ISOLATION_MODE_NONE
) {
5366 rc
= qeth_setadpparms_set_access_ctrl(card
,
5367 card
->options
.isolation
);
5372 qeth_init_link_info(card
);
5374 rc
= qeth_init_qdio_queues(card
);
5376 QETH_CARD_TEXT_(card
, 2, "9err%d", rc
);
5382 dev_warn(&card
->gdev
->dev
, "The qeth device driver failed to recover "
5383 "an error on the device\n");
5384 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5385 CARD_DEVID(card
), rc
);
5389 static int qeth_set_online(struct qeth_card
*card
,
5390 const struct qeth_discipline
*disc
)
5395 mutex_lock(&card
->conf_mutex
);
5396 QETH_CARD_TEXT(card
, 2, "setonlin");
5398 rc
= qeth_hardsetup_card(card
, &carrier_ok
);
5400 QETH_CARD_TEXT_(card
, 2, "2err%04x", rc
);
5405 qeth_print_status_message(card
);
5407 if (card
->dev
->reg_state
!= NETREG_REGISTERED
)
5408 /* no need for locking / error handling at this early stage: */
5409 qeth_set_real_num_tx_queues(card
, qeth_tx_actual_queues(card
));
5411 rc
= disc
->set_online(card
, carrier_ok
);
5415 /* let user_space know that device is online */
5416 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5418 mutex_unlock(&card
->conf_mutex
);
5423 qeth_qdio_clear_card(card
, 0);
5424 qeth_clear_working_pool_list(card
);
5425 qeth_flush_local_addrs(card
);
5427 qeth_stop_channel(&card
->data
);
5428 qeth_stop_channel(&card
->write
);
5429 qeth_stop_channel(&card
->read
);
5430 qdio_free(CARD_DDEV(card
));
5432 mutex_unlock(&card
->conf_mutex
);
5436 int qeth_set_offline(struct qeth_card
*card
, const struct qeth_discipline
*disc
,
5441 mutex_lock(&card
->conf_mutex
);
5442 QETH_CARD_TEXT(card
, 3, "setoffl");
5444 if ((!resetting
&& card
->info
.hwtrap
) || card
->info
.hwtrap
== 2) {
5445 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
5446 card
->info
.hwtrap
= 1;
5449 /* cancel any stalled cmd that might block the rtnl: */
5450 qeth_clear_ipacmd_list(card
);
5453 card
->info
.open_when_online
= card
->dev
->flags
& IFF_UP
;
5454 dev_close(card
->dev
);
5455 netif_device_detach(card
->dev
);
5456 netif_carrier_off(card
->dev
);
5459 cancel_work_sync(&card
->rx_mode_work
);
5461 disc
->set_offline(card
);
5463 qeth_qdio_clear_card(card
, 0);
5464 qeth_drain_output_queues(card
);
5465 qeth_clear_working_pool_list(card
);
5466 qeth_flush_local_addrs(card
);
5467 card
->info
.promisc_mode
= 0;
5469 rc
= qeth_stop_channel(&card
->data
);
5470 rc2
= qeth_stop_channel(&card
->write
);
5471 rc3
= qeth_stop_channel(&card
->read
);
5473 rc
= (rc2
) ? rc2
: rc3
;
5475 QETH_CARD_TEXT_(card
, 2, "1err%d", rc
);
5476 qdio_free(CARD_DDEV(card
));
5478 /* let user_space know that device is offline */
5479 kobject_uevent(&card
->gdev
->dev
.kobj
, KOBJ_CHANGE
);
5481 mutex_unlock(&card
->conf_mutex
);
5484 EXPORT_SYMBOL_GPL(qeth_set_offline
);
5486 static int qeth_do_reset(void *data
)
5488 const struct qeth_discipline
*disc
;
5489 struct qeth_card
*card
= data
;
5492 /* Lock-free, other users will block until we are done. */
5493 disc
= card
->discipline
;
5495 QETH_CARD_TEXT(card
, 2, "recover1");
5496 if (!qeth_do_run_thread(card
, QETH_RECOVER_THREAD
))
5498 QETH_CARD_TEXT(card
, 2, "recover2");
5499 dev_warn(&card
->gdev
->dev
,
5500 "A recovery process has been started for the device\n");
5502 qeth_set_offline(card
, disc
, true);
5503 rc
= qeth_set_online(card
, disc
);
5505 dev_info(&card
->gdev
->dev
,
5506 "Device successfully recovered!\n");
5508 qeth_set_offline(card
, disc
, true);
5509 ccwgroup_set_offline(card
->gdev
, false);
5510 dev_warn(&card
->gdev
->dev
,
5511 "The qeth device driver failed to recover an error on the device\n");
5513 qeth_clear_thread_start_bit(card
, QETH_RECOVER_THREAD
);
5514 qeth_clear_thread_running_bit(card
, QETH_RECOVER_THREAD
);
5518 #if IS_ENABLED(CONFIG_QETH_L3)
5519 static void qeth_l3_rebuild_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5520 struct qeth_hdr
*hdr
)
5522 struct af_iucv_trans_hdr
*iucv
= (struct af_iucv_trans_hdr
*) skb
->data
;
5523 struct qeth_hdr_layer3
*l3_hdr
= &hdr
->hdr
.l3
;
5524 struct net_device
*dev
= skb
->dev
;
5526 if (IS_IQD(card
) && iucv
->magic
== ETH_P_AF_IUCV
) {
5527 dev_hard_header(skb
, dev
, ETH_P_AF_IUCV
, dev
->dev_addr
,
5528 "FAKELL", skb
->len
);
5532 if (!(l3_hdr
->flags
& QETH_HDR_PASSTHRU
)) {
5533 u16 prot
= (l3_hdr
->flags
& QETH_HDR_IPV6
) ? ETH_P_IPV6
:
5535 unsigned char tg_addr
[ETH_ALEN
];
5537 skb_reset_network_header(skb
);
5538 switch (l3_hdr
->flags
& QETH_HDR_CAST_MASK
) {
5539 case QETH_CAST_MULTICAST
:
5540 if (prot
== ETH_P_IP
)
5541 ip_eth_mc_map(ip_hdr(skb
)->daddr
, tg_addr
);
5543 ipv6_eth_mc_map(&ipv6_hdr(skb
)->daddr
, tg_addr
);
5544 QETH_CARD_STAT_INC(card
, rx_multicast
);
5546 case QETH_CAST_BROADCAST
:
5547 ether_addr_copy(tg_addr
, dev
->broadcast
);
5548 QETH_CARD_STAT_INC(card
, rx_multicast
);
5551 if (card
->options
.sniffer
)
5552 skb
->pkt_type
= PACKET_OTHERHOST
;
5553 ether_addr_copy(tg_addr
, dev
->dev_addr
);
5556 if (l3_hdr
->ext_flags
& QETH_HDR_EXT_SRC_MAC_ADDR
)
5557 dev_hard_header(skb
, dev
, prot
, tg_addr
,
5558 &l3_hdr
->next_hop
.rx
.src_mac
, skb
->len
);
5560 dev_hard_header(skb
, dev
, prot
, tg_addr
, "FAKELL",
5564 /* copy VLAN tag from hdr into skb */
5565 if (!card
->options
.sniffer
&&
5566 (l3_hdr
->ext_flags
& (QETH_HDR_EXT_VLAN_FRAME
|
5567 QETH_HDR_EXT_INCLUDE_VLAN_TAG
))) {
5568 u16 tag
= (l3_hdr
->ext_flags
& QETH_HDR_EXT_VLAN_FRAME
) ?
5570 l3_hdr
->next_hop
.rx
.vlan_id
;
5572 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tag
);
5577 static void qeth_receive_skb(struct qeth_card
*card
, struct sk_buff
*skb
,
5578 bool uses_frags
, bool is_cso
)
5580 struct napi_struct
*napi
= &card
->napi
;
5582 if (is_cso
&& (card
->dev
->features
& NETIF_F_RXCSUM
)) {
5583 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5584 QETH_CARD_STAT_INC(card
, rx_skb_csum
);
5586 skb
->ip_summed
= CHECKSUM_NONE
;
5589 QETH_CARD_STAT_ADD(card
, rx_bytes
, skb
->len
);
5590 QETH_CARD_STAT_INC(card
, rx_packets
);
5591 if (skb_is_nonlinear(skb
)) {
5592 QETH_CARD_STAT_INC(card
, rx_sg_skbs
);
5593 QETH_CARD_STAT_ADD(card
, rx_sg_frags
,
5594 skb_shinfo(skb
)->nr_frags
);
5598 napi_gro_frags(napi
);
5600 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5601 napi_gro_receive(napi
, skb
);
5605 static void qeth_create_skb_frag(struct sk_buff
*skb
, char *data
, int data_len
)
5607 struct page
*page
= virt_to_page(data
);
5608 unsigned int next_frag
;
5610 next_frag
= skb_shinfo(skb
)->nr_frags
;
5612 skb_add_rx_frag(skb
, next_frag
, page
, offset_in_page(data
), data_len
,
5616 static inline int qeth_is_last_sbale(struct qdio_buffer_element
*sbale
)
5618 return (sbale
->eflags
& SBAL_EFLAGS_LAST_ENTRY
);
5621 static int qeth_extract_skb(struct qeth_card
*card
,
5622 struct qeth_qdio_buffer
*qethbuffer
, u8
*element_no
,
5625 struct qeth_priv
*priv
= netdev_priv(card
->dev
);
5626 struct qdio_buffer
*buffer
= qethbuffer
->buffer
;
5627 struct napi_struct
*napi
= &card
->napi
;
5628 struct qdio_buffer_element
*element
;
5629 unsigned int linear_len
= 0;
5630 bool uses_frags
= false;
5631 int offset
= *__offset
;
5632 bool use_rx_sg
= false;
5633 unsigned int headroom
;
5634 struct qeth_hdr
*hdr
;
5635 struct sk_buff
*skb
;
5639 element
= &buffer
->element
[*element_no
];
5642 /* qeth_hdr must not cross element boundaries */
5643 while (element
->length
< offset
+ sizeof(struct qeth_hdr
)) {
5644 if (qeth_is_last_sbale(element
))
5650 hdr
= phys_to_virt(element
->addr
) + offset
;
5651 offset
+= sizeof(*hdr
);
5654 switch (hdr
->hdr
.l2
.id
) {
5655 case QETH_HEADER_TYPE_LAYER2
:
5656 skb_len
= hdr
->hdr
.l2
.pkt_length
;
5657 is_cso
= hdr
->hdr
.l2
.flags
[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5659 linear_len
= ETH_HLEN
;
5662 case QETH_HEADER_TYPE_LAYER3
:
5663 skb_len
= hdr
->hdr
.l3
.length
;
5664 is_cso
= hdr
->hdr
.l3
.ext_flags
& QETH_HDR_EXT_CSUM_TRANSP_REQ
;
5666 if (!IS_LAYER3(card
)) {
5667 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5671 if (hdr
->hdr
.l3
.flags
& QETH_HDR_PASSTHRU
) {
5672 linear_len
= ETH_HLEN
;
5677 if (hdr
->hdr
.l3
.flags
& QETH_HDR_IPV6
)
5678 linear_len
= sizeof(struct ipv6hdr
);
5680 linear_len
= sizeof(struct iphdr
);
5681 headroom
= ETH_HLEN
;
5684 if (hdr
->hdr
.l2
.id
& QETH_HEADER_MASK_INVAL
)
5685 QETH_CARD_STAT_INC(card
, rx_frame_errors
);
5687 QETH_CARD_STAT_INC(card
, rx_dropped_notsupp
);
5689 /* Can't determine packet length, drop the whole buffer. */
5690 return -EPROTONOSUPPORT
;
5693 if (skb_len
< linear_len
) {
5694 QETH_CARD_STAT_INC(card
, rx_dropped_runt
);
5698 use_rx_sg
= (card
->options
.cq
== QETH_CQ_ENABLED
) ||
5699 (skb_len
> READ_ONCE(priv
->rx_copybreak
) &&
5700 !atomic_read(&card
->force_alloc_skb
));
5703 /* QETH_CQ_ENABLED only: */
5704 if (qethbuffer
->rx_skb
&&
5705 skb_tailroom(qethbuffer
->rx_skb
) >= linear_len
+ headroom
) {
5706 skb
= qethbuffer
->rx_skb
;
5707 qethbuffer
->rx_skb
= NULL
;
5711 skb
= napi_get_frags(napi
);
5713 /* -ENOMEM, no point in falling back further. */
5714 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5718 if (skb_tailroom(skb
) >= linear_len
+ headroom
) {
5723 netdev_info_once(card
->dev
,
5724 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5725 linear_len
+ headroom
, skb_tailroom(skb
));
5726 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5729 linear_len
= skb_len
;
5730 skb
= napi_alloc_skb(napi
, linear_len
+ headroom
);
5732 QETH_CARD_STAT_INC(card
, rx_dropped_nomem
);
5738 skb_reserve(skb
, headroom
);
5741 int data_len
= min(skb_len
, (int)(element
->length
- offset
));
5742 char *data
= phys_to_virt(element
->addr
) + offset
;
5744 skb_len
-= data_len
;
5747 /* Extract data from current element: */
5748 if (skb
&& data_len
) {
5750 unsigned int copy_len
;
5752 copy_len
= min_t(unsigned int, linear_len
,
5755 skb_put_data(skb
, data
, copy_len
);
5756 linear_len
-= copy_len
;
5757 data_len
-= copy_len
;
5762 qeth_create_skb_frag(skb
, data
, data_len
);
5765 /* Step forward to next element: */
5767 if (qeth_is_last_sbale(element
)) {
5768 QETH_CARD_TEXT(card
, 4, "unexeob");
5769 QETH_CARD_HEX(card
, 2, buffer
, sizeof(void *));
5772 napi_free_frags(napi
);
5775 QETH_CARD_STAT_INC(card
,
5785 /* This packet was skipped, go get another one: */
5789 *element_no
= element
- &buffer
->element
[0];
5792 #if IS_ENABLED(CONFIG_QETH_L3)
5793 if (hdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER3
)
5794 qeth_l3_rebuild_skb(card
, skb
, hdr
);
5797 qeth_receive_skb(card
, skb
, uses_frags
, is_cso
);
5801 static unsigned int qeth_extract_skbs(struct qeth_card
*card
, int budget
,
5802 struct qeth_qdio_buffer
*buf
, bool *done
)
5804 unsigned int work_done
= 0;
5807 if (qeth_extract_skb(card
, buf
, &card
->rx
.buf_element
,
5808 &card
->rx
.e_offset
)) {
5820 static unsigned int qeth_rx_poll(struct qeth_card
*card
, int budget
)
5822 struct qeth_rx
*ctx
= &card
->rx
;
5823 unsigned int work_done
= 0;
5825 while (budget
> 0) {
5826 struct qeth_qdio_buffer
*buffer
;
5827 unsigned int skbs_done
= 0;
5830 /* Fetch completed RX buffers: */
5831 if (!card
->rx
.b_count
) {
5832 card
->rx
.qdio_err
= 0;
5833 card
->rx
.b_count
= qdio_inspect_queue(CARD_DDEV(card
),
5836 &card
->rx
.qdio_err
);
5837 if (card
->rx
.b_count
<= 0) {
5838 card
->rx
.b_count
= 0;
5843 /* Process one completed RX buffer: */
5844 buffer
= &card
->qdio
.in_q
->bufs
[card
->rx
.b_index
];
5845 if (!(card
->rx
.qdio_err
&&
5846 qeth_check_qdio_errors(card
, buffer
->buffer
,
5847 card
->rx
.qdio_err
, "qinerr")))
5848 skbs_done
= qeth_extract_skbs(card
, budget
, buffer
,
5853 work_done
+= skbs_done
;
5854 budget
-= skbs_done
;
5857 QETH_CARD_STAT_INC(card
, rx_bufs
);
5858 qeth_put_buffer_pool_entry(card
, buffer
->pool_entry
);
5859 buffer
->pool_entry
= NULL
;
5862 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
,
5865 /* Step forward to next buffer: */
5866 card
->rx
.b_index
= QDIO_BUFNR(card
->rx
.b_index
+ 1);
5867 card
->rx
.buf_element
= 0;
5868 card
->rx
.e_offset
= 0;
5875 static void qeth_cq_poll(struct qeth_card
*card
)
5877 unsigned int work_done
= 0;
5879 while (work_done
< QDIO_MAX_BUFFERS_PER_Q
) {
5880 unsigned int start
, error
;
5883 completed
= qdio_inspect_queue(CARD_DDEV(card
), 1, true, &start
,
5888 qeth_qdio_cq_handler(card
, error
, 1, start
, completed
);
5889 work_done
+= completed
;
5893 int qeth_poll(struct napi_struct
*napi
, int budget
)
5895 struct qeth_card
*card
= container_of(napi
, struct qeth_card
, napi
);
5896 unsigned int work_done
;
5898 work_done
= qeth_rx_poll(card
, budget
);
5900 if (qeth_use_tx_irqs(card
)) {
5901 struct qeth_qdio_out_q
*queue
;
5904 qeth_for_each_output_queue(card
, queue
, i
) {
5905 if (!qeth_out_queue_is_empty(queue
))
5906 napi_schedule(&queue
->napi
);
5910 if (card
->options
.cq
== QETH_CQ_ENABLED
)
5914 struct qeth_rx
*ctx
= &card
->rx
;
5916 /* Process any substantial refill backlog: */
5917 ctx
->bufs_refill
-= qeth_rx_refill_queue(card
, ctx
->bufs_refill
);
5919 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5920 if (work_done
>= budget
)
5924 if (napi_complete_done(napi
, work_done
) &&
5925 qdio_start_irq(CARD_DDEV(card
)))
5926 napi_schedule(napi
);
5930 EXPORT_SYMBOL_GPL(qeth_poll
);
5932 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q
*queue
,
5933 unsigned int bidx
, unsigned int qdio_error
,
5936 struct qeth_qdio_out_buffer
*buffer
= queue
->bufs
[bidx
];
5937 u8 sflags
= buffer
->buffer
->element
[15].sflags
;
5938 struct qeth_card
*card
= queue
->card
;
5939 bool error
= !!qdio_error
;
5941 if (qdio_error
== QDIO_ERROR_SLSB_PENDING
) {
5942 struct qaob
*aob
= buffer
->aob
;
5943 struct qeth_qaob_priv1
*priv
;
5944 enum iucv_tx_notify notify
;
5947 netdev_WARN_ONCE(card
->dev
,
5948 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5949 bidx
, queue
->queue_no
);
5950 qeth_schedule_recovery(card
);
5954 QETH_CARD_TEXT_(card
, 5, "pel%u", bidx
);
5956 priv
= (struct qeth_qaob_priv1
*)&aob
->user1
;
5957 /* QAOB hasn't completed yet: */
5958 if (xchg(&priv
->state
, QETH_QAOB_PENDING
) != QETH_QAOB_DONE
) {
5959 qeth_notify_skbs(queue
, buffer
, TX_NOTIFY_PENDING
);
5961 /* Prepare the queue slot for immediate re-use: */
5962 qeth_scrub_qdio_buffer(buffer
->buffer
, queue
->max_elements
);
5963 if (qeth_alloc_out_buf(queue
, bidx
, GFP_ATOMIC
)) {
5964 QETH_CARD_TEXT(card
, 2, "outofbuf");
5965 qeth_schedule_recovery(card
);
5968 list_add(&buffer
->list_entry
, &queue
->pending_bufs
);
5969 /* Skip clearing the buffer: */
5973 /* QAOB already completed: */
5974 notify
= qeth_compute_cq_notification(aob
->aorc
, 0);
5975 qeth_notify_skbs(queue
, buffer
, notify
);
5976 error
= !!aob
->aorc
;
5977 memset(aob
, 0, sizeof(*aob
));
5978 } else if (card
->options
.cq
== QETH_CQ_ENABLED
) {
5979 qeth_notify_skbs(queue
, buffer
,
5980 qeth_compute_cq_notification(sflags
, 0));
5983 qeth_clear_output_buffer(queue
, buffer
, error
, budget
);
5986 static int qeth_tx_poll(struct napi_struct
*napi
, int budget
)
5988 struct qeth_qdio_out_q
*queue
= qeth_napi_to_out_queue(napi
);
5989 unsigned int queue_no
= queue
->queue_no
;
5990 struct qeth_card
*card
= queue
->card
;
5991 struct net_device
*dev
= card
->dev
;
5992 unsigned int work_done
= 0;
5993 struct netdev_queue
*txq
;
5996 txq
= netdev_get_tx_queue(dev
, qeth_iqd_translate_txq(dev
, queue_no
));
5998 txq
= netdev_get_tx_queue(dev
, queue_no
);
6001 unsigned int start
, error
, i
;
6002 unsigned int packets
= 0;
6003 unsigned int bytes
= 0;
6006 qeth_tx_complete_pending_bufs(card
, queue
, false, budget
);
6008 if (qeth_out_queue_is_empty(queue
)) {
6009 napi_complete(napi
);
6013 /* Give the CPU a breather: */
6014 if (work_done
>= QDIO_MAX_BUFFERS_PER_Q
) {
6015 QETH_TXQ_STAT_INC(queue
, completion_yield
);
6016 if (napi_complete_done(napi
, 0))
6017 napi_schedule(napi
);
6021 completed
= qdio_inspect_queue(CARD_DDEV(card
), queue_no
, false,
6023 if (completed
<= 0) {
6024 /* Ensure we see TX completion for pending work: */
6025 if (napi_complete_done(napi
, 0) &&
6026 !atomic_read(&queue
->set_pci_flags_count
))
6027 qeth_tx_arm_timer(queue
, queue
->rescan_usecs
);
6031 for (i
= start
; i
< start
+ completed
; i
++) {
6032 struct qeth_qdio_out_buffer
*buffer
;
6033 unsigned int bidx
= QDIO_BUFNR(i
);
6035 buffer
= queue
->bufs
[bidx
];
6036 packets
+= buffer
->frames
;
6037 bytes
+= buffer
->bytes
;
6039 qeth_handle_send_error(card
, buffer
, error
);
6041 qeth_iqd_tx_complete(queue
, bidx
, error
, budget
);
6043 qeth_clear_output_buffer(queue
, buffer
, error
,
6047 atomic_sub(completed
, &queue
->used_buffers
);
6048 work_done
+= completed
;
6050 netdev_tx_completed_queue(txq
, packets
, bytes
);
6052 qeth_check_outbound_queue(queue
);
6054 /* xmit may have observed the full-condition, but not yet
6055 * stopped the txq. In which case the code below won't trigger.
6056 * So before returning, xmit will re-check the txq's fill level
6057 * and wake it up if needed.
6059 if (netif_tx_queue_stopped(txq
) &&
6060 !qeth_out_queue_is_full(queue
))
6061 netif_tx_wake_queue(txq
);
6065 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd
*cmd
)
6067 if (!cmd
->hdr
.return_code
)
6068 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
6069 return cmd
->hdr
.return_code
;
6072 static int qeth_setassparms_get_caps_cb(struct qeth_card
*card
,
6073 struct qeth_reply
*reply
,
6076 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6077 struct qeth_ipa_caps
*caps
= reply
->param
;
6079 if (qeth_setassparms_inspect_rc(cmd
))
6082 caps
->supported
= cmd
->data
.setassparms
.data
.caps
.supported
;
6083 caps
->enabled
= cmd
->data
.setassparms
.data
.caps
.enabled
;
6087 int qeth_setassparms_cb(struct qeth_card
*card
,
6088 struct qeth_reply
*reply
, unsigned long data
)
6090 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6092 QETH_CARD_TEXT(card
, 4, "defadpcb");
6094 if (cmd
->hdr
.return_code
)
6097 cmd
->hdr
.return_code
= cmd
->data
.setassparms
.hdr
.return_code
;
6098 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV4
)
6099 card
->options
.ipa4
.enabled
= cmd
->hdr
.assists
.enabled
;
6100 if (cmd
->hdr
.prot_version
== QETH_PROT_IPV6
)
6101 card
->options
.ipa6
.enabled
= cmd
->hdr
.assists
.enabled
;
6104 EXPORT_SYMBOL_GPL(qeth_setassparms_cb
);
6106 struct qeth_cmd_buffer
*qeth_get_setassparms_cmd(struct qeth_card
*card
,
6107 enum qeth_ipa_funcs ipa_func
,
6109 unsigned int data_length
,
6110 enum qeth_prot_versions prot
)
6112 struct qeth_ipacmd_setassparms
*setassparms
;
6113 struct qeth_ipacmd_setassparms_hdr
*hdr
;
6114 struct qeth_cmd_buffer
*iob
;
6116 QETH_CARD_TEXT(card
, 4, "getasscm");
6117 iob
= qeth_ipa_alloc_cmd(card
, IPA_CMD_SETASSPARMS
, prot
,
6119 offsetof(struct qeth_ipacmd_setassparms
,
6124 setassparms
= &__ipa_cmd(iob
)->data
.setassparms
;
6125 setassparms
->assist_no
= ipa_func
;
6127 hdr
= &setassparms
->hdr
;
6128 hdr
->length
= sizeof(*hdr
) + data_length
;
6129 hdr
->command_code
= cmd_code
;
6132 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd
);
6134 int qeth_send_simple_setassparms_prot(struct qeth_card
*card
,
6135 enum qeth_ipa_funcs ipa_func
,
6136 u16 cmd_code
, u32
*data
,
6137 enum qeth_prot_versions prot
)
6139 unsigned int length
= data
? SETASS_DATA_SIZEOF(flags_32bit
) : 0;
6140 struct qeth_cmd_buffer
*iob
;
6142 QETH_CARD_TEXT_(card
, 4, "simassp%i", prot
);
6143 iob
= qeth_get_setassparms_cmd(card
, ipa_func
, cmd_code
, length
, prot
);
6148 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= *data
;
6149 return qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_cb
, NULL
);
6151 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot
);
6153 static void qeth_unregister_dbf_views(void)
6157 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6158 debug_unregister(qeth_dbf
[x
].id
);
6159 qeth_dbf
[x
].id
= NULL
;
6163 void qeth_dbf_longtext(debug_info_t
*id
, int level
, char *fmt
, ...)
6165 char dbf_txt_buf
[32];
6168 if (!debug_level_enabled(id
, level
))
6170 va_start(args
, fmt
);
6171 vsnprintf(dbf_txt_buf
, sizeof(dbf_txt_buf
), fmt
, args
);
6173 debug_text_event(id
, level
, dbf_txt_buf
);
6175 EXPORT_SYMBOL_GPL(qeth_dbf_longtext
);
6177 static int qeth_register_dbf_views(void)
6182 for (x
= 0; x
< QETH_DBF_INFOS
; x
++) {
6183 /* register the areas */
6184 qeth_dbf
[x
].id
= debug_register(qeth_dbf
[x
].name
,
6188 if (qeth_dbf
[x
].id
== NULL
) {
6189 qeth_unregister_dbf_views();
6193 /* register a view */
6194 ret
= debug_register_view(qeth_dbf
[x
].id
, qeth_dbf
[x
].view
);
6196 qeth_unregister_dbf_views();
6200 /* set a passing level */
6201 debug_set_level(qeth_dbf
[x
].id
, qeth_dbf
[x
].level
);
6207 static DEFINE_MUTEX(qeth_mod_mutex
); /* for synchronized module loading */
6209 int qeth_setup_discipline(struct qeth_card
*card
,
6210 enum qeth_discipline_id discipline
)
6214 mutex_lock(&qeth_mod_mutex
);
6215 switch (discipline
) {
6216 case QETH_DISCIPLINE_LAYER3
:
6217 card
->discipline
= try_then_request_module(
6218 symbol_get(qeth_l3_discipline
), "qeth_l3");
6220 case QETH_DISCIPLINE_LAYER2
:
6221 card
->discipline
= try_then_request_module(
6222 symbol_get(qeth_l2_discipline
), "qeth_l2");
6227 mutex_unlock(&qeth_mod_mutex
);
6229 if (!card
->discipline
) {
6230 dev_err(&card
->gdev
->dev
, "There is no kernel module to "
6231 "support discipline %d\n", discipline
);
6235 rc
= card
->discipline
->setup(card
->gdev
);
6237 if (discipline
== QETH_DISCIPLINE_LAYER2
)
6238 symbol_put(qeth_l2_discipline
);
6240 symbol_put(qeth_l3_discipline
);
6241 card
->discipline
= NULL
;
6246 card
->options
.layer
= discipline
;
6250 void qeth_remove_discipline(struct qeth_card
*card
)
6252 card
->discipline
->remove(card
->gdev
);
6254 if (IS_LAYER2(card
))
6255 symbol_put(qeth_l2_discipline
);
6257 symbol_put(qeth_l3_discipline
);
6258 card
->options
.layer
= QETH_DISCIPLINE_UNDETERMINED
;
6259 card
->discipline
= NULL
;
6262 static const struct device_type qeth_generic_devtype
= {
6263 .name
= "qeth_generic",
6266 #define DBF_NAME_LEN 20
6268 struct qeth_dbf_entry
{
6269 char dbf_name
[DBF_NAME_LEN
];
6270 debug_info_t
*dbf_info
;
6271 struct list_head dbf_list
;
6274 static LIST_HEAD(qeth_dbf_list
);
6275 static DEFINE_MUTEX(qeth_dbf_list_mutex
);
6277 static debug_info_t
*qeth_get_dbf_entry(char *name
)
6279 struct qeth_dbf_entry
*entry
;
6280 debug_info_t
*rc
= NULL
;
6282 mutex_lock(&qeth_dbf_list_mutex
);
6283 list_for_each_entry(entry
, &qeth_dbf_list
, dbf_list
) {
6284 if (strcmp(entry
->dbf_name
, name
) == 0) {
6285 rc
= entry
->dbf_info
;
6289 mutex_unlock(&qeth_dbf_list_mutex
);
6293 static int qeth_add_dbf_entry(struct qeth_card
*card
, char *name
)
6295 struct qeth_dbf_entry
*new_entry
;
6297 card
->debug
= debug_register(name
, 2, 1, 8);
6299 QETH_DBF_TEXT_(SETUP
, 2, "%s", "qcdbf");
6302 if (debug_register_view(card
->debug
, &debug_hex_ascii_view
))
6304 new_entry
= kzalloc(sizeof(struct qeth_dbf_entry
), GFP_KERNEL
);
6307 strncpy(new_entry
->dbf_name
, name
, DBF_NAME_LEN
);
6308 new_entry
->dbf_info
= card
->debug
;
6309 mutex_lock(&qeth_dbf_list_mutex
);
6310 list_add(&new_entry
->dbf_list
, &qeth_dbf_list
);
6311 mutex_unlock(&qeth_dbf_list_mutex
);
6316 debug_unregister(card
->debug
);
6321 static void qeth_clear_dbf_list(void)
6323 struct qeth_dbf_entry
*entry
, *tmp
;
6325 mutex_lock(&qeth_dbf_list_mutex
);
6326 list_for_each_entry_safe(entry
, tmp
, &qeth_dbf_list
, dbf_list
) {
6327 list_del(&entry
->dbf_list
);
6328 debug_unregister(entry
->dbf_info
);
6331 mutex_unlock(&qeth_dbf_list_mutex
);
6334 static struct net_device
*qeth_alloc_netdev(struct qeth_card
*card
)
6336 struct net_device
*dev
;
6337 struct qeth_priv
*priv
;
6339 switch (card
->info
.type
) {
6340 case QETH_CARD_TYPE_IQD
:
6341 dev
= alloc_netdev_mqs(sizeof(*priv
), "hsi%d", NET_NAME_UNKNOWN
,
6342 ether_setup
, QETH_MAX_OUT_QUEUES
, 1);
6344 case QETH_CARD_TYPE_OSM
:
6345 dev
= alloc_etherdev(sizeof(*priv
));
6348 dev
= alloc_etherdev_mqs(sizeof(*priv
), QETH_MAX_OUT_QUEUES
, 1);
6354 priv
= netdev_priv(dev
);
6355 priv
->rx_copybreak
= QETH_RX_COPYBREAK
;
6356 priv
->tx_wanted_queues
= IS_IQD(card
) ? QETH_IQD_MIN_TXQ
: 1;
6358 dev
->ml_priv
= card
;
6359 dev
->watchdog_timeo
= QETH_TX_TIMEOUT
;
6361 /* initialized when device first goes online: */
6364 SET_NETDEV_DEV(dev
, &card
->gdev
->dev
);
6365 netif_carrier_off(dev
);
6367 dev
->ethtool_ops
= &qeth_ethtool_ops
;
6368 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
6369 dev
->hw_features
|= NETIF_F_SG
;
6370 dev
->vlan_features
|= NETIF_F_SG
;
6372 dev
->features
|= NETIF_F_SG
;
6377 struct net_device
*qeth_clone_netdev(struct net_device
*orig
)
6379 struct net_device
*clone
= qeth_alloc_netdev(orig
->ml_priv
);
6384 clone
->dev_port
= orig
->dev_port
;
6388 static int qeth_core_probe_device(struct ccwgroup_device
*gdev
)
6390 struct qeth_card
*card
;
6393 enum qeth_discipline_id enforced_disc
;
6394 char dbf_name
[DBF_NAME_LEN
];
6396 QETH_DBF_TEXT(SETUP
, 2, "probedev");
6399 if (!get_device(dev
))
6402 QETH_DBF_TEXT_(SETUP
, 2, "%s", dev_name(&gdev
->dev
));
6404 card
= qeth_alloc_card(gdev
);
6406 QETH_DBF_TEXT_(SETUP
, 2, "1err%d", -ENOMEM
);
6411 snprintf(dbf_name
, sizeof(dbf_name
), "qeth_card_%s",
6412 dev_name(&gdev
->dev
));
6413 card
->debug
= qeth_get_dbf_entry(dbf_name
);
6415 rc
= qeth_add_dbf_entry(card
, dbf_name
);
6420 qeth_setup_card(card
);
6421 card
->dev
= qeth_alloc_netdev(card
);
6427 qeth_determine_capabilities(card
);
6428 qeth_set_blkt_defaults(card
);
6430 card
->qdio
.in_q
= qeth_alloc_qdio_queue();
6431 if (!card
->qdio
.in_q
) {
6436 card
->qdio
.no_out_queues
= card
->dev
->num_tx_queues
;
6437 rc
= qeth_update_from_chp_desc(card
);
6441 gdev
->dev
.groups
= qeth_dev_groups
;
6443 enforced_disc
= qeth_enforce_discipline(card
);
6444 switch (enforced_disc
) {
6445 case QETH_DISCIPLINE_UNDETERMINED
:
6446 gdev
->dev
.type
= &qeth_generic_devtype
;
6449 card
->info
.layer_enforced
= true;
6450 /* It's so early that we don't need the discipline_mutex yet. */
6451 rc
= qeth_setup_discipline(card
, enforced_disc
);
6453 goto err_setup_disc
;
6462 qeth_free_qdio_queue(card
->qdio
.in_q
);
6464 free_netdev(card
->dev
);
6466 qeth_core_free_card(card
);
6472 static void qeth_core_remove_device(struct ccwgroup_device
*gdev
)
6474 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6476 QETH_CARD_TEXT(card
, 2, "removedv");
6478 mutex_lock(&card
->discipline_mutex
);
6479 if (card
->discipline
)
6480 qeth_remove_discipline(card
);
6481 mutex_unlock(&card
->discipline_mutex
);
6483 qeth_free_qdio_queues(card
);
6485 qeth_free_qdio_queue(card
->qdio
.in_q
);
6486 free_netdev(card
->dev
);
6487 qeth_core_free_card(card
);
6488 put_device(&gdev
->dev
);
6491 static int qeth_core_set_online(struct ccwgroup_device
*gdev
)
6493 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6495 enum qeth_discipline_id def_discipline
;
6497 mutex_lock(&card
->discipline_mutex
);
6498 if (!card
->discipline
) {
6499 def_discipline
= IS_IQD(card
) ? QETH_DISCIPLINE_LAYER3
:
6500 QETH_DISCIPLINE_LAYER2
;
6501 rc
= qeth_setup_discipline(card
, def_discipline
);
6506 rc
= qeth_set_online(card
, card
->discipline
);
6509 mutex_unlock(&card
->discipline_mutex
);
6513 static int qeth_core_set_offline(struct ccwgroup_device
*gdev
)
6515 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6518 mutex_lock(&card
->discipline_mutex
);
6519 rc
= qeth_set_offline(card
, card
->discipline
, false);
6520 mutex_unlock(&card
->discipline_mutex
);
6525 static void qeth_core_shutdown(struct ccwgroup_device
*gdev
)
6527 struct qeth_card
*card
= dev_get_drvdata(&gdev
->dev
);
6529 qeth_set_allowed_threads(card
, 0, 1);
6530 if ((gdev
->state
== CCWGROUP_ONLINE
) && card
->info
.hwtrap
)
6531 qeth_hw_trap(card
, QETH_DIAGS_TRAP_DISARM
);
6532 qeth_qdio_clear_card(card
, 0);
6533 qeth_drain_output_queues(card
);
6534 qdio_free(CARD_DDEV(card
));
6537 static ssize_t
group_store(struct device_driver
*ddrv
, const char *buf
,
6542 err
= ccwgroup_create_dev(qeth_core_root_dev
, to_ccwgroupdrv(ddrv
), 3,
6545 return err
? err
: count
;
6547 static DRIVER_ATTR_WO(group
);
6549 static struct attribute
*qeth_drv_attrs
[] = {
6550 &driver_attr_group
.attr
,
6553 static struct attribute_group qeth_drv_attr_group
= {
6554 .attrs
= qeth_drv_attrs
,
6556 static const struct attribute_group
*qeth_drv_attr_groups
[] = {
6557 &qeth_drv_attr_group
,
6561 static struct ccwgroup_driver qeth_core_ccwgroup_driver
= {
6563 .groups
= qeth_drv_attr_groups
,
6564 .owner
= THIS_MODULE
,
6567 .ccw_driver
= &qeth_ccw_driver
,
6568 .setup
= qeth_core_probe_device
,
6569 .remove
= qeth_core_remove_device
,
6570 .set_online
= qeth_core_set_online
,
6571 .set_offline
= qeth_core_set_offline
,
6572 .shutdown
= qeth_core_shutdown
,
6575 int qeth_siocdevprivate(struct net_device
*dev
, struct ifreq
*rq
, void __user
*data
, int cmd
)
6577 struct qeth_card
*card
= dev
->ml_priv
;
6581 case SIOC_QETH_ADP_SET_SNMP_CONTROL
:
6582 rc
= qeth_snmp_command(card
, data
);
6584 case SIOC_QETH_GET_CARD_TYPE
:
6585 if ((IS_OSD(card
) || IS_OSM(card
) || IS_OSX(card
)) &&
6589 case SIOC_QETH_QUERY_OAT
:
6590 rc
= qeth_query_oat_command(card
, data
);
6596 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
6599 EXPORT_SYMBOL_GPL(qeth_siocdevprivate
);
6601 int qeth_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
6603 struct qeth_card
*card
= dev
->ml_priv
;
6604 struct mii_ioctl_data
*mii_data
;
6609 mii_data
= if_mii(rq
);
6610 mii_data
->phy_id
= 0;
6613 mii_data
= if_mii(rq
);
6614 if (mii_data
->phy_id
!= 0)
6617 mii_data
->val_out
= qeth_mdio_read(dev
,
6618 mii_data
->phy_id
, mii_data
->reg_num
);
6624 QETH_CARD_TEXT_(card
, 2, "ioce%x", rc
);
6627 EXPORT_SYMBOL_GPL(qeth_do_ioctl
);
6629 static int qeth_start_csum_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6632 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6633 u32
*features
= reply
->param
;
6635 if (qeth_setassparms_inspect_rc(cmd
))
6638 *features
= cmd
->data
.setassparms
.data
.flags_32bit
;
6642 static int qeth_set_csum_off(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6643 enum qeth_prot_versions prot
)
6645 return qeth_send_simple_setassparms_prot(card
, cstype
, IPA_CMD_ASS_STOP
,
6649 static int qeth_set_csum_on(struct qeth_card
*card
, enum qeth_ipa_funcs cstype
,
6650 enum qeth_prot_versions prot
, u8
*lp2lp
)
6652 u32 required_features
= QETH_IPA_CHECKSUM_UDP
| QETH_IPA_CHECKSUM_TCP
;
6653 struct qeth_cmd_buffer
*iob
;
6654 struct qeth_ipa_caps caps
;
6658 /* some L3 HW requires combined L3+L4 csum offload: */
6659 if (IS_LAYER3(card
) && prot
== QETH_PROT_IPV4
&&
6660 cstype
== IPA_OUTBOUND_CHECKSUM
)
6661 required_features
|= QETH_IPA_CHECKSUM_IP_HDR
;
6663 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_START
, 0,
6668 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_csum_cb
, &features
);
6672 if ((required_features
& features
) != required_features
) {
6673 qeth_set_csum_off(card
, cstype
, prot
);
6677 iob
= qeth_get_setassparms_cmd(card
, cstype
, IPA_CMD_ASS_ENABLE
,
6678 SETASS_DATA_SIZEOF(flags_32bit
),
6681 qeth_set_csum_off(card
, cstype
, prot
);
6685 if (features
& QETH_IPA_CHECKSUM_LP2LP
)
6686 required_features
|= QETH_IPA_CHECKSUM_LP2LP
;
6687 __ipa_cmd(iob
)->data
.setassparms
.data
.flags_32bit
= required_features
;
6688 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6690 qeth_set_csum_off(card
, cstype
, prot
);
6694 if (!qeth_ipa_caps_supported(&caps
, required_features
) ||
6695 !qeth_ipa_caps_enabled(&caps
, required_features
)) {
6696 qeth_set_csum_off(card
, cstype
, prot
);
6700 dev_info(&card
->gdev
->dev
, "HW Checksumming (%sbound IPv%d) enabled\n",
6701 cstype
== IPA_INBOUND_CHECKSUM
? "in" : "out", prot
);
6704 *lp2lp
= qeth_ipa_caps_enabled(&caps
, QETH_IPA_CHECKSUM_LP2LP
);
6709 static int qeth_set_ipa_csum(struct qeth_card
*card
, bool on
, int cstype
,
6710 enum qeth_prot_versions prot
, u8
*lp2lp
)
6712 return on
? qeth_set_csum_on(card
, cstype
, prot
, lp2lp
) :
6713 qeth_set_csum_off(card
, cstype
, prot
);
6716 static int qeth_start_tso_cb(struct qeth_card
*card
, struct qeth_reply
*reply
,
6719 struct qeth_ipa_cmd
*cmd
= (struct qeth_ipa_cmd
*) data
;
6720 struct qeth_tso_start_data
*tso_data
= reply
->param
;
6722 if (qeth_setassparms_inspect_rc(cmd
))
6725 tso_data
->mss
= cmd
->data
.setassparms
.data
.tso
.mss
;
6726 tso_data
->supported
= cmd
->data
.setassparms
.data
.tso
.supported
;
6730 static int qeth_set_tso_off(struct qeth_card
*card
,
6731 enum qeth_prot_versions prot
)
6733 return qeth_send_simple_setassparms_prot(card
, IPA_OUTBOUND_TSO
,
6734 IPA_CMD_ASS_STOP
, NULL
, prot
);
6737 static int qeth_set_tso_on(struct qeth_card
*card
,
6738 enum qeth_prot_versions prot
)
6740 struct qeth_tso_start_data tso_data
;
6741 struct qeth_cmd_buffer
*iob
;
6742 struct qeth_ipa_caps caps
;
6745 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6746 IPA_CMD_ASS_START
, 0, prot
);
6750 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_start_tso_cb
, &tso_data
);
6754 if (!tso_data
.mss
|| !(tso_data
.supported
& QETH_IPA_LARGE_SEND_TCP
)) {
6755 qeth_set_tso_off(card
, prot
);
6759 iob
= qeth_get_setassparms_cmd(card
, IPA_OUTBOUND_TSO
,
6761 SETASS_DATA_SIZEOF(caps
), prot
);
6763 qeth_set_tso_off(card
, prot
);
6767 /* enable TSO capability */
6768 __ipa_cmd(iob
)->data
.setassparms
.data
.caps
.enabled
=
6769 QETH_IPA_LARGE_SEND_TCP
;
6770 rc
= qeth_send_ipa_cmd(card
, iob
, qeth_setassparms_get_caps_cb
, &caps
);
6772 qeth_set_tso_off(card
, prot
);
6776 if (!qeth_ipa_caps_supported(&caps
, QETH_IPA_LARGE_SEND_TCP
) ||
6777 !qeth_ipa_caps_enabled(&caps
, QETH_IPA_LARGE_SEND_TCP
)) {
6778 qeth_set_tso_off(card
, prot
);
6782 dev_info(&card
->gdev
->dev
, "TSOv%u enabled (MSS: %u)\n", prot
,
6787 static int qeth_set_ipa_tso(struct qeth_card
*card
, bool on
,
6788 enum qeth_prot_versions prot
)
6790 return on
? qeth_set_tso_on(card
, prot
) : qeth_set_tso_off(card
, prot
);
6793 static int qeth_set_ipa_rx_csum(struct qeth_card
*card
, bool on
)
6795 int rc_ipv4
= (on
) ? -EOPNOTSUPP
: 0;
6798 if (qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
))
6799 rc_ipv4
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6800 QETH_PROT_IPV4
, NULL
);
6801 if (!qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6802 /* no/one Offload Assist available, so the rc is trivial */
6805 rc_ipv6
= qeth_set_ipa_csum(card
, on
, IPA_INBOUND_CHECKSUM
,
6806 QETH_PROT_IPV6
, NULL
);
6809 /* enable: success if any Assist is active */
6810 return (rc_ipv6
) ? rc_ipv4
: 0;
6812 /* disable: failure if any Assist is still active */
6813 return (rc_ipv6
) ? rc_ipv6
: rc_ipv4
;
6817 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6818 * @dev: a net_device
6820 void qeth_enable_hw_features(struct net_device
*dev
)
6822 struct qeth_card
*card
= dev
->ml_priv
;
6823 netdev_features_t features
;
6825 features
= dev
->features
;
6826 /* force-off any feature that might need an IPA sequence.
6827 * netdev_update_features() will restart them.
6829 dev
->features
&= ~dev
->hw_features
;
6830 /* toggle VLAN filter, so that VIDs are re-programmed: */
6831 if (IS_LAYER2(card
) && IS_VM_NIC(card
)) {
6832 dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
6833 dev
->wanted_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
6835 netdev_update_features(dev
);
6836 if (features
!= dev
->features
)
6837 dev_warn(&card
->gdev
->dev
,
6838 "Device recovery failed to restore all offload features\n");
6840 EXPORT_SYMBOL_GPL(qeth_enable_hw_features
);
6842 static void qeth_check_restricted_features(struct qeth_card
*card
,
6843 netdev_features_t changed
,
6844 netdev_features_t actual
)
6846 netdev_features_t ipv6_features
= NETIF_F_TSO6
;
6847 netdev_features_t ipv4_features
= NETIF_F_TSO
;
6849 if (!card
->info
.has_lp2lp_cso_v6
)
6850 ipv6_features
|= NETIF_F_IPV6_CSUM
;
6851 if (!card
->info
.has_lp2lp_cso_v4
)
6852 ipv4_features
|= NETIF_F_IP_CSUM
;
6854 if ((changed
& ipv6_features
) && !(actual
& ipv6_features
))
6855 qeth_flush_local_addrs6(card
);
6856 if ((changed
& ipv4_features
) && !(actual
& ipv4_features
))
6857 qeth_flush_local_addrs4(card
);
6860 int qeth_set_features(struct net_device
*dev
, netdev_features_t features
)
6862 struct qeth_card
*card
= dev
->ml_priv
;
6863 netdev_features_t changed
= dev
->features
^ features
;
6866 QETH_CARD_TEXT(card
, 2, "setfeat");
6867 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6869 if ((changed
& NETIF_F_IP_CSUM
)) {
6870 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IP_CSUM
,
6871 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV4
,
6872 &card
->info
.has_lp2lp_cso_v4
);
6874 changed
^= NETIF_F_IP_CSUM
;
6876 if (changed
& NETIF_F_IPV6_CSUM
) {
6877 rc
= qeth_set_ipa_csum(card
, features
& NETIF_F_IPV6_CSUM
,
6878 IPA_OUTBOUND_CHECKSUM
, QETH_PROT_IPV6
,
6879 &card
->info
.has_lp2lp_cso_v6
);
6881 changed
^= NETIF_F_IPV6_CSUM
;
6883 if (changed
& NETIF_F_RXCSUM
) {
6884 rc
= qeth_set_ipa_rx_csum(card
, features
& NETIF_F_RXCSUM
);
6886 changed
^= NETIF_F_RXCSUM
;
6888 if (changed
& NETIF_F_TSO
) {
6889 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO
,
6892 changed
^= NETIF_F_TSO
;
6894 if (changed
& NETIF_F_TSO6
) {
6895 rc
= qeth_set_ipa_tso(card
, features
& NETIF_F_TSO6
,
6898 changed
^= NETIF_F_TSO6
;
6901 qeth_check_restricted_features(card
, dev
->features
^ features
,
6902 dev
->features
^ changed
);
6904 /* everything changed successfully? */
6905 if ((dev
->features
^ features
) == changed
)
6907 /* something went wrong. save changed features and return error */
6908 dev
->features
^= changed
;
6911 EXPORT_SYMBOL_GPL(qeth_set_features
);
6913 netdev_features_t
qeth_fix_features(struct net_device
*dev
,
6914 netdev_features_t features
)
6916 struct qeth_card
*card
= dev
->ml_priv
;
6918 QETH_CARD_TEXT(card
, 2, "fixfeat");
6919 if (!qeth_is_supported(card
, IPA_OUTBOUND_CHECKSUM
))
6920 features
&= ~NETIF_F_IP_CSUM
;
6921 if (!qeth_is_supported6(card
, IPA_OUTBOUND_CHECKSUM_V6
))
6922 features
&= ~NETIF_F_IPV6_CSUM
;
6923 if (!qeth_is_supported(card
, IPA_INBOUND_CHECKSUM
) &&
6924 !qeth_is_supported6(card
, IPA_INBOUND_CHECKSUM_V6
))
6925 features
&= ~NETIF_F_RXCSUM
;
6926 if (!qeth_is_supported(card
, IPA_OUTBOUND_TSO
))
6927 features
&= ~NETIF_F_TSO
;
6928 if (!qeth_is_supported6(card
, IPA_OUTBOUND_TSO
))
6929 features
&= ~NETIF_F_TSO6
;
6931 QETH_CARD_HEX(card
, 2, &features
, sizeof(features
));
6934 EXPORT_SYMBOL_GPL(qeth_fix_features
);
6936 netdev_features_t
qeth_features_check(struct sk_buff
*skb
,
6937 struct net_device
*dev
,
6938 netdev_features_t features
)
6940 struct qeth_card
*card
= dev
->ml_priv
;
6942 /* Traffic with local next-hop is not eligible for some offloads: */
6943 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
6944 READ_ONCE(card
->options
.isolation
) != ISOLATION_MODE_FWD
) {
6945 netdev_features_t restricted
= 0;
6947 if (skb_is_gso(skb
) && !netif_needs_gso(skb
, features
))
6948 restricted
|= NETIF_F_ALL_TSO
;
6950 switch (vlan_get_protocol(skb
)) {
6951 case htons(ETH_P_IP
):
6952 if (!card
->info
.has_lp2lp_cso_v4
)
6953 restricted
|= NETIF_F_IP_CSUM
;
6955 if (restricted
&& qeth_next_hop_is_local_v4(card
, skb
))
6956 features
&= ~restricted
;
6958 case htons(ETH_P_IPV6
):
6959 if (!card
->info
.has_lp2lp_cso_v6
)
6960 restricted
|= NETIF_F_IPV6_CSUM
;
6962 if (restricted
&& qeth_next_hop_is_local_v6(card
, skb
))
6963 features
&= ~restricted
;
6970 /* GSO segmentation builds skbs with
6971 * a (small) linear part for the headers, and
6972 * page frags for the data.
6973 * Compared to a linear skb, the header-only part consumes an
6974 * additional buffer element. This reduces buffer utilization, and
6975 * hurts throughput. So compress small segments into one element.
6977 if (netif_needs_gso(skb
, features
)) {
6978 /* match skb_segment(): */
6979 unsigned int doffset
= skb
->data
- skb_mac_header(skb
);
6980 unsigned int hsize
= skb_shinfo(skb
)->gso_size
;
6981 unsigned int hroom
= skb_headroom(skb
);
6983 /* linearize only if resulting skb allocations are order-0: */
6984 if (SKB_DATA_ALIGN(hroom
+ doffset
+ hsize
) <= SKB_MAX_HEAD(0))
6985 features
&= ~NETIF_F_SG
;
6988 return vlan_features_check(skb
, features
);
6990 EXPORT_SYMBOL_GPL(qeth_features_check
);
6992 void qeth_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6994 struct qeth_card
*card
= dev
->ml_priv
;
6995 struct qeth_qdio_out_q
*queue
;
6998 QETH_CARD_TEXT(card
, 5, "getstat");
7000 stats
->rx_packets
= card
->stats
.rx_packets
;
7001 stats
->rx_bytes
= card
->stats
.rx_bytes
;
7002 stats
->rx_errors
= card
->stats
.rx_length_errors
+
7003 card
->stats
.rx_frame_errors
+
7004 card
->stats
.rx_fifo_errors
;
7005 stats
->rx_dropped
= card
->stats
.rx_dropped_nomem
+
7006 card
->stats
.rx_dropped_notsupp
+
7007 card
->stats
.rx_dropped_runt
;
7008 stats
->multicast
= card
->stats
.rx_multicast
;
7009 stats
->rx_length_errors
= card
->stats
.rx_length_errors
;
7010 stats
->rx_frame_errors
= card
->stats
.rx_frame_errors
;
7011 stats
->rx_fifo_errors
= card
->stats
.rx_fifo_errors
;
7013 for (i
= 0; i
< card
->qdio
.no_out_queues
; i
++) {
7014 queue
= card
->qdio
.out_qs
[i
];
7016 stats
->tx_packets
+= queue
->stats
.tx_packets
;
7017 stats
->tx_bytes
+= queue
->stats
.tx_bytes
;
7018 stats
->tx_errors
+= queue
->stats
.tx_errors
;
7019 stats
->tx_dropped
+= queue
->stats
.tx_dropped
;
7022 EXPORT_SYMBOL_GPL(qeth_get_stats64
);
7024 #define TC_IQD_UCAST 0
7025 static void qeth_iqd_set_prio_tc_map(struct net_device
*dev
,
7026 unsigned int ucast_txqs
)
7030 /* IQD requires mcast traffic to be placed on a dedicated queue, and
7031 * qeth_iqd_select_queue() deals with this.
7032 * For unicast traffic, we defer the queue selection to the stack.
7033 * By installing a trivial prio map that spans over only the unicast
7034 * queues, we can encourage the stack to spread the ucast traffic evenly
7035 * without selecting the mcast queue.
7038 /* One traffic class, spanning over all active ucast queues: */
7039 netdev_set_num_tc(dev
, 1);
7040 netdev_set_tc_queue(dev
, TC_IQD_UCAST
, ucast_txqs
,
7041 QETH_IQD_MIN_UCAST_TXQ
);
7043 /* Map all priorities to this traffic class: */
7044 for (prio
= 0; prio
<= TC_BITMASK
; prio
++)
7045 netdev_set_prio_tc_map(dev
, prio
, TC_IQD_UCAST
);
7048 int qeth_set_real_num_tx_queues(struct qeth_card
*card
, unsigned int count
)
7050 struct net_device
*dev
= card
->dev
;
7053 /* Per netif_setup_tc(), adjust the mapping first: */
7055 qeth_iqd_set_prio_tc_map(dev
, count
- 1);
7057 rc
= netif_set_real_num_tx_queues(dev
, count
);
7059 if (rc
&& IS_IQD(card
))
7060 qeth_iqd_set_prio_tc_map(dev
, dev
->real_num_tx_queues
- 1);
7064 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues
);
7066 u16
qeth_iqd_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
7067 u8 cast_type
, struct net_device
*sb_dev
)
7071 if (cast_type
!= RTN_UNICAST
)
7072 return QETH_IQD_MCAST_TXQ
;
7073 if (dev
->real_num_tx_queues
== QETH_IQD_MIN_TXQ
)
7074 return QETH_IQD_MIN_UCAST_TXQ
;
7076 txq
= netdev_pick_tx(dev
, skb
, sb_dev
);
7077 return (txq
== QETH_IQD_MCAST_TXQ
) ? QETH_IQD_MIN_UCAST_TXQ
: txq
;
7079 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue
);
7081 int qeth_open(struct net_device
*dev
)
7083 struct qeth_card
*card
= dev
->ml_priv
;
7084 struct qeth_qdio_out_q
*queue
;
7087 QETH_CARD_TEXT(card
, 4, "qethopen");
7089 card
->data
.state
= CH_STATE_UP
;
7090 netif_tx_start_all_queues(dev
);
7093 qeth_for_each_output_queue(card
, queue
, i
) {
7094 netif_tx_napi_add(dev
, &queue
->napi
, qeth_tx_poll
,
7096 napi_enable(&queue
->napi
);
7097 napi_schedule(&queue
->napi
);
7100 napi_enable(&card
->napi
);
7101 napi_schedule(&card
->napi
);
7102 /* kick-start the NAPI softirq: */
7107 EXPORT_SYMBOL_GPL(qeth_open
);
7109 int qeth_stop(struct net_device
*dev
)
7111 struct qeth_card
*card
= dev
->ml_priv
;
7112 struct qeth_qdio_out_q
*queue
;
7115 QETH_CARD_TEXT(card
, 4, "qethstop");
7117 napi_disable(&card
->napi
);
7118 cancel_delayed_work_sync(&card
->buffer_reclaim_work
);
7119 qdio_stop_irq(CARD_DDEV(card
));
7121 /* Quiesce the NAPI instances: */
7122 qeth_for_each_output_queue(card
, queue
, i
)
7123 napi_disable(&queue
->napi
);
7125 /* Stop .ndo_start_xmit, might still access queue->napi. */
7126 netif_tx_disable(dev
);
7128 qeth_for_each_output_queue(card
, queue
, i
) {
7129 del_timer_sync(&queue
->timer
);
7130 /* Queues may get re-allocated, so remove the NAPIs. */
7131 netif_napi_del(&queue
->napi
);
7136 EXPORT_SYMBOL_GPL(qeth_stop
);
7138 static int __init
qeth_core_init(void)
7142 pr_info("loading core functions\n");
7144 qeth_debugfs_root
= debugfs_create_dir("qeth", NULL
);
7146 rc
= qeth_register_dbf_views();
7149 qeth_core_root_dev
= root_device_register("qeth");
7150 rc
= PTR_ERR_OR_ZERO(qeth_core_root_dev
);
7153 qeth_core_header_cache
=
7154 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE
,
7155 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE
),
7157 if (!qeth_core_header_cache
) {
7161 qeth_qdio_outbuf_cache
= kmem_cache_create("qeth_buf",
7162 sizeof(struct qeth_qdio_out_buffer
), 0, 0, NULL
);
7163 if (!qeth_qdio_outbuf_cache
) {
7168 qeth_qaob_cache
= kmem_cache_create("qeth_qaob",
7169 sizeof(struct qaob
),
7170 sizeof(struct qaob
),
7172 if (!qeth_qaob_cache
) {
7177 rc
= ccw_driver_register(&qeth_ccw_driver
);
7180 rc
= ccwgroup_driver_register(&qeth_core_ccwgroup_driver
);
7187 ccw_driver_unregister(&qeth_ccw_driver
);
7189 kmem_cache_destroy(qeth_qaob_cache
);
7191 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7193 kmem_cache_destroy(qeth_core_header_cache
);
7195 root_device_unregister(qeth_core_root_dev
);
7197 qeth_unregister_dbf_views();
7199 debugfs_remove_recursive(qeth_debugfs_root
);
7200 pr_err("Initializing the qeth device driver failed\n");
7204 static void __exit
qeth_core_exit(void)
7206 qeth_clear_dbf_list();
7207 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver
);
7208 ccw_driver_unregister(&qeth_ccw_driver
);
7209 kmem_cache_destroy(qeth_qaob_cache
);
7210 kmem_cache_destroy(qeth_qdio_outbuf_cache
);
7211 kmem_cache_destroy(qeth_core_header_cache
);
7212 root_device_unregister(qeth_core_root_dev
);
7213 qeth_unregister_dbf_views();
7214 debugfs_remove_recursive(qeth_debugfs_root
);
7215 pr_info("core functions removed\n");
7218 module_init(qeth_core_init
);
7219 module_exit(qeth_core_exit
);
7220 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7221 MODULE_DESCRIPTION("qeth core functions");
7222 MODULE_LICENSE("GPL");