]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/s390/net/netiucv.c
[SK_BUFF]: Convert skb->tail to sk_buff_data_t
[thirdparty/linux.git] / drivers / s390 / net / netiucv.c
1 /*
2 * IUCV network driver
3 *
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
9 *
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 */
33
34 #undef DEBUG
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
45
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
49
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
56
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
59
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
62
63 MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
66
67 /**
68 * Debug Facility stuff
69 */
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
75
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
81
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
87
88 #define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
92
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
97
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99
100 #define IUCV_DBF_TEXT_(name,level,text...) \
101 do { \
102 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
103 sprintf(iucv_dbf_txt_buf, text); \
104 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
105 put_cpu_var(iucv_dbf_txt_buf); \
106 } while (0)
107
108 #define IUCV_DBF_SPRINTF(name,level,text...) \
109 do { \
110 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
111 debug_sprintf_event(iucv_dbf_trace, level, text ); \
112 } while (0)
113
114 /**
115 * some more debug stuff
116 */
117 #define IUCV_HEXDUMP16(importance,header,ptr) \
118 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
119 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
120 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
121 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
122 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
123 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
124 *(((char*)ptr)+12),*(((char*)ptr)+13), \
125 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
126 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
127 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
128 *(((char*)ptr)+16),*(((char*)ptr)+17), \
129 *(((char*)ptr)+18),*(((char*)ptr)+19), \
130 *(((char*)ptr)+20),*(((char*)ptr)+21), \
131 *(((char*)ptr)+22),*(((char*)ptr)+23), \
132 *(((char*)ptr)+24),*(((char*)ptr)+25), \
133 *(((char*)ptr)+26),*(((char*)ptr)+27), \
134 *(((char*)ptr)+28),*(((char*)ptr)+29), \
135 *(((char*)ptr)+30),*(((char*)ptr)+31));
136
137 static inline void iucv_hex_dump(unsigned char *buf, size_t len)
138 {
139 size_t i;
140
141 for (i = 0; i < len; i++) {
142 if (i && !(i % 16))
143 printk("\n");
144 printk("%02x ", *(buf + i));
145 }
146 printk("\n");
147 }
148
149 #define PRINTK_HEADER " iucv: " /* for debugging */
150
151 static struct device_driver netiucv_driver = {
152 .name = "netiucv",
153 .bus = &iucv_bus,
154 };
155
156 static int netiucv_callback_connreq(struct iucv_path *,
157 u8 ipvmid[8], u8 ipuser[16]);
158 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
162 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
163 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
164
165 static struct iucv_handler netiucv_handler = {
166 .path_pending = netiucv_callback_connreq,
167 .path_complete = netiucv_callback_connack,
168 .path_severed = netiucv_callback_connrej,
169 .path_quiesced = netiucv_callback_connsusp,
170 .path_resumed = netiucv_callback_connres,
171 .message_pending = netiucv_callback_rx,
172 .message_complete = netiucv_callback_txdone
173 };
174
175 /**
176 * Per connection profiling data
177 */
178 struct connection_profile {
179 unsigned long maxmulti;
180 unsigned long maxcqueue;
181 unsigned long doios_single;
182 unsigned long doios_multi;
183 unsigned long txlen;
184 unsigned long tx_time;
185 struct timespec send_stamp;
186 unsigned long tx_pending;
187 unsigned long tx_max_pending;
188 };
189
190 /**
191 * Representation of one iucv connection
192 */
193 struct iucv_connection {
194 struct list_head list;
195 struct iucv_path *path;
196 struct sk_buff *rx_buff;
197 struct sk_buff *tx_buff;
198 struct sk_buff_head collect_queue;
199 struct sk_buff_head commit_queue;
200 spinlock_t collect_lock;
201 int collect_len;
202 int max_buffsize;
203 fsm_timer timer;
204 fsm_instance *fsm;
205 struct net_device *netdev;
206 struct connection_profile prof;
207 char userid[9];
208 };
209
210 /**
211 * Linked list of all connection structs.
212 */
213 static struct list_head iucv_connection_list =
214 LIST_HEAD_INIT(iucv_connection_list);
215 static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
216
217 /**
218 * Representation of event-data for the
219 * connection state machine.
220 */
221 struct iucv_event {
222 struct iucv_connection *conn;
223 void *data;
224 };
225
226 /**
227 * Private part of the network device structure
228 */
229 struct netiucv_priv {
230 struct net_device_stats stats;
231 unsigned long tbusy;
232 fsm_instance *fsm;
233 struct iucv_connection *conn;
234 struct device *dev;
235 };
236
237 /**
238 * Link level header for a packet.
239 */
240 struct ll_header {
241 u16 next;
242 };
243
244 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
245 #define NETIUCV_BUFSIZE_MAX 32768
246 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
247 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
248 #define NETIUCV_MTU_DEFAULT 9216
249 #define NETIUCV_QUEUELEN_DEFAULT 50
250 #define NETIUCV_TIMEOUT_5SEC 5000
251
252 /**
253 * Compatibility macros for busy handling
254 * of network devices.
255 */
256 static inline void netiucv_clear_busy(struct net_device *dev)
257 {
258 struct netiucv_priv *priv = netdev_priv(dev);
259 clear_bit(0, &priv->tbusy);
260 netif_wake_queue(dev);
261 }
262
263 static inline int netiucv_test_and_set_busy(struct net_device *dev)
264 {
265 struct netiucv_priv *priv = netdev_priv(dev);
266 netif_stop_queue(dev);
267 return test_and_set_bit(0, &priv->tbusy);
268 }
269
270 static u8 iucvMagic[16] = {
271 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
273 };
274
275 /**
276 * Convert an iucv userId to its printable
277 * form (strip whitespace at end).
278 *
279 * @param An iucv userId
280 *
281 * @returns The printable string (static data!!)
282 */
283 static inline char *netiucv_printname(char *name)
284 {
285 static char tmp[9];
286 char *p = tmp;
287 memcpy(tmp, name, 8);
288 tmp[8] = '\0';
289 while (*p && (!isspace(*p)))
290 p++;
291 *p = '\0';
292 return tmp;
293 }
294
295 /**
296 * States of the interface statemachine.
297 */
298 enum dev_states {
299 DEV_STATE_STOPPED,
300 DEV_STATE_STARTWAIT,
301 DEV_STATE_STOPWAIT,
302 DEV_STATE_RUNNING,
303 /**
304 * MUST be always the last element!!
305 */
306 NR_DEV_STATES
307 };
308
309 static const char *dev_state_names[] = {
310 "Stopped",
311 "StartWait",
312 "StopWait",
313 "Running",
314 };
315
316 /**
317 * Events of the interface statemachine.
318 */
319 enum dev_events {
320 DEV_EVENT_START,
321 DEV_EVENT_STOP,
322 DEV_EVENT_CONUP,
323 DEV_EVENT_CONDOWN,
324 /**
325 * MUST be always the last element!!
326 */
327 NR_DEV_EVENTS
328 };
329
330 static const char *dev_event_names[] = {
331 "Start",
332 "Stop",
333 "Connection up",
334 "Connection down",
335 };
336
337 /**
338 * Events of the connection statemachine
339 */
340 enum conn_events {
341 /**
342 * Events, representing callbacks from
343 * lowlevel iucv layer)
344 */
345 CONN_EVENT_CONN_REQ,
346 CONN_EVENT_CONN_ACK,
347 CONN_EVENT_CONN_REJ,
348 CONN_EVENT_CONN_SUS,
349 CONN_EVENT_CONN_RES,
350 CONN_EVENT_RX,
351 CONN_EVENT_TXDONE,
352
353 /**
354 * Events, representing errors return codes from
355 * calls to lowlevel iucv layer
356 */
357
358 /**
359 * Event, representing timer expiry.
360 */
361 CONN_EVENT_TIMER,
362
363 /**
364 * Events, representing commands from upper levels.
365 */
366 CONN_EVENT_START,
367 CONN_EVENT_STOP,
368
369 /**
370 * MUST be always the last element!!
371 */
372 NR_CONN_EVENTS,
373 };
374
375 static const char *conn_event_names[] = {
376 "Remote connection request",
377 "Remote connection acknowledge",
378 "Remote connection reject",
379 "Connection suspended",
380 "Connection resumed",
381 "Data received",
382 "Data sent",
383
384 "Timer",
385
386 "Start",
387 "Stop",
388 };
389
390 /**
391 * States of the connection statemachine.
392 */
393 enum conn_states {
394 /**
395 * Connection not assigned to any device,
396 * initial state, invalid
397 */
398 CONN_STATE_INVALID,
399
400 /**
401 * Userid assigned but not operating
402 */
403 CONN_STATE_STOPPED,
404
405 /**
406 * Connection registered,
407 * no connection request sent yet,
408 * no connection request received
409 */
410 CONN_STATE_STARTWAIT,
411
412 /**
413 * Connection registered and connection request sent,
414 * no acknowledge and no connection request received yet.
415 */
416 CONN_STATE_SETUPWAIT,
417
418 /**
419 * Connection up and running idle
420 */
421 CONN_STATE_IDLE,
422
423 /**
424 * Data sent, awaiting CONN_EVENT_TXDONE
425 */
426 CONN_STATE_TX,
427
428 /**
429 * Error during registration.
430 */
431 CONN_STATE_REGERR,
432
433 /**
434 * Error during registration.
435 */
436 CONN_STATE_CONNERR,
437
438 /**
439 * MUST be always the last element!!
440 */
441 NR_CONN_STATES,
442 };
443
444 static const char *conn_state_names[] = {
445 "Invalid",
446 "Stopped",
447 "StartWait",
448 "SetupWait",
449 "Idle",
450 "TX",
451 "Terminating",
452 "Registration error",
453 "Connect error",
454 };
455
456
457 /**
458 * Debug Facility Stuff
459 */
460 static debug_info_t *iucv_dbf_setup = NULL;
461 static debug_info_t *iucv_dbf_data = NULL;
462 static debug_info_t *iucv_dbf_trace = NULL;
463
464 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
465
466 static void iucv_unregister_dbf_views(void)
467 {
468 if (iucv_dbf_setup)
469 debug_unregister(iucv_dbf_setup);
470 if (iucv_dbf_data)
471 debug_unregister(iucv_dbf_data);
472 if (iucv_dbf_trace)
473 debug_unregister(iucv_dbf_trace);
474 }
475 static int iucv_register_dbf_views(void)
476 {
477 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
478 IUCV_DBF_SETUP_PAGES,
479 IUCV_DBF_SETUP_NR_AREAS,
480 IUCV_DBF_SETUP_LEN);
481 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
482 IUCV_DBF_DATA_PAGES,
483 IUCV_DBF_DATA_NR_AREAS,
484 IUCV_DBF_DATA_LEN);
485 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
486 IUCV_DBF_TRACE_PAGES,
487 IUCV_DBF_TRACE_NR_AREAS,
488 IUCV_DBF_TRACE_LEN);
489
490 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
491 (iucv_dbf_trace == NULL)) {
492 iucv_unregister_dbf_views();
493 return -ENOMEM;
494 }
495 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
496 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
497
498 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
499 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
500
501 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
502 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
503
504 return 0;
505 }
506
507 /*
508 * Callback-wrappers, called from lowlevel iucv layer.
509 */
510
511 static void netiucv_callback_rx(struct iucv_path *path,
512 struct iucv_message *msg)
513 {
514 struct iucv_connection *conn = path->private;
515 struct iucv_event ev;
516
517 ev.conn = conn;
518 ev.data = msg;
519 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
520 }
521
522 static void netiucv_callback_txdone(struct iucv_path *path,
523 struct iucv_message *msg)
524 {
525 struct iucv_connection *conn = path->private;
526 struct iucv_event ev;
527
528 ev.conn = conn;
529 ev.data = msg;
530 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
531 }
532
533 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
534 {
535 struct iucv_connection *conn = path->private;
536
537 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
538 }
539
540 static int netiucv_callback_connreq(struct iucv_path *path,
541 u8 ipvmid[8], u8 ipuser[16])
542 {
543 struct iucv_connection *conn = path->private;
544 struct iucv_event ev;
545 int rc;
546
547 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
548 /* ipuser must match iucvMagic. */
549 return -EINVAL;
550 rc = -EINVAL;
551 read_lock_bh(&iucv_connection_rwlock);
552 list_for_each_entry(conn, &iucv_connection_list, list) {
553 if (strncmp(ipvmid, conn->userid, 8))
554 continue;
555 /* Found a matching connection for this path. */
556 conn->path = path;
557 ev.conn = conn;
558 ev.data = path;
559 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
560 rc = 0;
561 }
562 read_unlock_bh(&iucv_connection_rwlock);
563 return rc;
564 }
565
566 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
567 {
568 struct iucv_connection *conn = path->private;
569
570 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
571 }
572
573 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
574 {
575 struct iucv_connection *conn = path->private;
576
577 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
578 }
579
580 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
581 {
582 struct iucv_connection *conn = path->private;
583
584 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
585 }
586
587 /**
588 * Dummy NOP action for all statemachines
589 */
590 static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
591 {
592 }
593
594 /*
595 * Actions of the connection statemachine
596 */
597
598 /**
599 * netiucv_unpack_skb
600 * @conn: The connection where this skb has been received.
601 * @pskb: The received skb.
602 *
603 * Unpack a just received skb and hand it over to upper layers.
604 * Helper function for conn_action_rx.
605 */
606 static void netiucv_unpack_skb(struct iucv_connection *conn,
607 struct sk_buff *pskb)
608 {
609 struct net_device *dev = conn->netdev;
610 struct netiucv_priv *privptr = netdev_priv(dev);
611 u16 offset = 0;
612
613 skb_put(pskb, NETIUCV_HDRLEN);
614 pskb->dev = dev;
615 pskb->ip_summed = CHECKSUM_NONE;
616 pskb->protocol = ntohs(ETH_P_IP);
617
618 while (1) {
619 struct sk_buff *skb;
620 struct ll_header *header = (struct ll_header *) pskb->data;
621
622 if (!header->next)
623 break;
624
625 skb_pull(pskb, NETIUCV_HDRLEN);
626 header->next -= offset;
627 offset += header->next;
628 header->next -= NETIUCV_HDRLEN;
629 if (skb_tailroom(pskb) < header->next) {
630 PRINT_WARN("%s: Illegal next field in iucv header: "
631 "%d > %d\n",
632 dev->name, header->next, skb_tailroom(pskb));
633 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
634 header->next, skb_tailroom(pskb));
635 return;
636 }
637 skb_put(pskb, header->next);
638 skb_reset_mac_header(pskb);
639 skb = dev_alloc_skb(pskb->len);
640 if (!skb) {
641 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
642 dev->name);
643 IUCV_DBF_TEXT(data, 2,
644 "Out of memory in netiucv_unpack_skb\n");
645 privptr->stats.rx_dropped++;
646 return;
647 }
648 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
649 skb_reset_mac_header(skb);
650 skb->dev = pskb->dev;
651 skb->protocol = pskb->protocol;
652 pskb->ip_summed = CHECKSUM_UNNECESSARY;
653 /*
654 * Since receiving is always initiated from a tasklet (in iucv.c),
655 * we must use netif_rx_ni() instead of netif_rx()
656 */
657 netif_rx_ni(skb);
658 dev->last_rx = jiffies;
659 privptr->stats.rx_packets++;
660 privptr->stats.rx_bytes += skb->len;
661 skb_pull(pskb, header->next);
662 skb_put(pskb, NETIUCV_HDRLEN);
663 }
664 }
665
666 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
667 {
668 struct iucv_event *ev = arg;
669 struct iucv_connection *conn = ev->conn;
670 struct iucv_message *msg = ev->data;
671 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
672 int rc;
673
674 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
675
676 if (!conn->netdev) {
677 iucv_message_reject(conn->path, msg);
678 PRINT_WARN("Received data for unlinked connection\n");
679 IUCV_DBF_TEXT(data, 2,
680 "Received data for unlinked connection\n");
681 return;
682 }
683 if (msg->length > conn->max_buffsize) {
684 iucv_message_reject(conn->path, msg);
685 privptr->stats.rx_dropped++;
686 PRINT_WARN("msglen %d > max_buffsize %d\n",
687 msg->length, conn->max_buffsize);
688 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
689 msg->length, conn->max_buffsize);
690 return;
691 }
692 conn->rx_buff->data = conn->rx_buff->head;
693 skb_reset_tail_pointer(conn->rx_buff);
694 conn->rx_buff->len = 0;
695 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
696 msg->length, NULL);
697 if (rc || msg->length < 5) {
698 privptr->stats.rx_errors++;
699 PRINT_WARN("iucv_receive returned %08x\n", rc);
700 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
701 return;
702 }
703 netiucv_unpack_skb(conn, conn->rx_buff);
704 }
705
706 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
707 {
708 struct iucv_event *ev = arg;
709 struct iucv_connection *conn = ev->conn;
710 struct iucv_message *msg = ev->data;
711 struct iucv_message txmsg;
712 struct netiucv_priv *privptr = NULL;
713 u32 single_flag = msg->tag;
714 u32 txbytes = 0;
715 u32 txpackets = 0;
716 u32 stat_maxcq = 0;
717 struct sk_buff *skb;
718 unsigned long saveflags;
719 struct ll_header header;
720 int rc;
721
722 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
723
724 if (conn && conn->netdev)
725 privptr = netdev_priv(conn->netdev);
726 conn->prof.tx_pending--;
727 if (single_flag) {
728 if ((skb = skb_dequeue(&conn->commit_queue))) {
729 atomic_dec(&skb->users);
730 dev_kfree_skb_any(skb);
731 if (privptr) {
732 privptr->stats.tx_packets++;
733 privptr->stats.tx_bytes +=
734 (skb->len - NETIUCV_HDRLEN
735 - NETIUCV_HDRLEN);
736 }
737 }
738 }
739 conn->tx_buff->data = conn->tx_buff->head;
740 skb_reset_tail_pointer(conn->tx_buff);
741 conn->tx_buff->len = 0;
742 spin_lock_irqsave(&conn->collect_lock, saveflags);
743 while ((skb = skb_dequeue(&conn->collect_queue))) {
744 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
745 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
746 NETIUCV_HDRLEN);
747 memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
748 txbytes += skb->len;
749 txpackets++;
750 stat_maxcq++;
751 atomic_dec(&skb->users);
752 dev_kfree_skb_any(skb);
753 }
754 if (conn->collect_len > conn->prof.maxmulti)
755 conn->prof.maxmulti = conn->collect_len;
756 conn->collect_len = 0;
757 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
758 if (conn->tx_buff->len == 0) {
759 fsm_newstate(fi, CONN_STATE_IDLE);
760 return;
761 }
762
763 header.next = 0;
764 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
765 conn->prof.send_stamp = xtime;
766 txmsg.class = 0;
767 txmsg.tag = 0;
768 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
769 conn->tx_buff->data, conn->tx_buff->len);
770 conn->prof.doios_multi++;
771 conn->prof.txlen += conn->tx_buff->len;
772 conn->prof.tx_pending++;
773 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
774 conn->prof.tx_max_pending = conn->prof.tx_pending;
775 if (rc) {
776 conn->prof.tx_pending--;
777 fsm_newstate(fi, CONN_STATE_IDLE);
778 if (privptr)
779 privptr->stats.tx_errors += txpackets;
780 PRINT_WARN("iucv_send returned %08x\n", rc);
781 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
782 } else {
783 if (privptr) {
784 privptr->stats.tx_packets += txpackets;
785 privptr->stats.tx_bytes += txbytes;
786 }
787 if (stat_maxcq > conn->prof.maxcqueue)
788 conn->prof.maxcqueue = stat_maxcq;
789 }
790 }
791
792 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
793 {
794 struct iucv_event *ev = arg;
795 struct iucv_connection *conn = ev->conn;
796 struct iucv_path *path = ev->data;
797 struct net_device *netdev = conn->netdev;
798 struct netiucv_priv *privptr = netdev_priv(netdev);
799 int rc;
800
801 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
802
803 conn->path = path;
804 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
805 path->flags = 0;
806 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
807 if (rc) {
808 PRINT_WARN("%s: IUCV accept failed with error %d\n",
809 netdev->name, rc);
810 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
811 return;
812 }
813 fsm_newstate(fi, CONN_STATE_IDLE);
814 netdev->tx_queue_len = conn->path->msglim;
815 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
816 }
817
818 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
819 {
820 struct iucv_event *ev = arg;
821 struct iucv_path *path = ev->data;
822
823 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
824 iucv_path_sever(path, NULL);
825 }
826
827 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
828 {
829 struct iucv_connection *conn = arg;
830 struct net_device *netdev = conn->netdev;
831 struct netiucv_priv *privptr = netdev_priv(netdev);
832
833 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
834 fsm_deltimer(&conn->timer);
835 fsm_newstate(fi, CONN_STATE_IDLE);
836 netdev->tx_queue_len = conn->path->msglim;
837 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
838 }
839
840 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
841 {
842 struct iucv_connection *conn = arg;
843
844 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
845 fsm_deltimer(&conn->timer);
846 iucv_path_sever(conn->path, NULL);
847 fsm_newstate(fi, CONN_STATE_STARTWAIT);
848 }
849
850 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
851 {
852 struct iucv_connection *conn = arg;
853 struct net_device *netdev = conn->netdev;
854 struct netiucv_priv *privptr = netdev_priv(netdev);
855
856 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
857
858 fsm_deltimer(&conn->timer);
859 iucv_path_sever(conn->path, NULL);
860 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
861 IUCV_DBF_TEXT(data, 2,
862 "conn_action_connsever: Remote dropped connection\n");
863 fsm_newstate(fi, CONN_STATE_STARTWAIT);
864 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
865 }
866
867 static void conn_action_start(fsm_instance *fi, int event, void *arg)
868 {
869 struct iucv_connection *conn = arg;
870 int rc;
871
872 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
873
874 fsm_newstate(fi, CONN_STATE_STARTWAIT);
875 PRINT_DEBUG("%s('%s'): connecting ...\n",
876 conn->netdev->name, conn->userid);
877
878 /*
879 * We must set the state before calling iucv_connect because the
880 * callback handler could be called at any point after the connection
881 * request is sent
882 */
883
884 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
885 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
886 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
887 NULL, iucvMagic, conn);
888 switch (rc) {
889 case 0:
890 conn->netdev->tx_queue_len = conn->path->msglim;
891 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
892 CONN_EVENT_TIMER, conn);
893 return;
894 case 11:
895 PRINT_INFO("%s: User %s is currently not available.\n",
896 conn->netdev->name,
897 netiucv_printname(conn->userid));
898 fsm_newstate(fi, CONN_STATE_STARTWAIT);
899 break;
900 case 12:
901 PRINT_INFO("%s: User %s is currently not ready.\n",
902 conn->netdev->name,
903 netiucv_printname(conn->userid));
904 fsm_newstate(fi, CONN_STATE_STARTWAIT);
905 break;
906 case 13:
907 PRINT_WARN("%s: Too many IUCV connections.\n",
908 conn->netdev->name);
909 fsm_newstate(fi, CONN_STATE_CONNERR);
910 break;
911 case 14:
912 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
913 conn->netdev->name,
914 netiucv_printname(conn->userid));
915 fsm_newstate(fi, CONN_STATE_CONNERR);
916 break;
917 case 15:
918 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
919 conn->netdev->name);
920 fsm_newstate(fi, CONN_STATE_CONNERR);
921 break;
922 default:
923 PRINT_WARN("%s: iucv_connect returned error %d\n",
924 conn->netdev->name, rc);
925 fsm_newstate(fi, CONN_STATE_CONNERR);
926 break;
927 }
928 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
929 kfree(conn->path);
930 conn->path = NULL;
931 }
932
933 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
934 {
935 struct sk_buff *skb;
936
937 while ((skb = skb_dequeue(q))) {
938 atomic_dec(&skb->users);
939 dev_kfree_skb_any(skb);
940 }
941 }
942
943 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
944 {
945 struct iucv_event *ev = arg;
946 struct iucv_connection *conn = ev->conn;
947 struct net_device *netdev = conn->netdev;
948 struct netiucv_priv *privptr = netdev_priv(netdev);
949
950 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
951
952 fsm_deltimer(&conn->timer);
953 fsm_newstate(fi, CONN_STATE_STOPPED);
954 netiucv_purge_skb_queue(&conn->collect_queue);
955 if (conn->path) {
956 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
957 iucv_path_sever(conn->path, iucvMagic);
958 kfree(conn->path);
959 conn->path = NULL;
960 }
961 netiucv_purge_skb_queue(&conn->commit_queue);
962 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
963 }
964
965 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
966 {
967 struct iucv_connection *conn = arg;
968 struct net_device *netdev = conn->netdev;
969
970 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
971 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
972 }
973
974 static const fsm_node conn_fsm[] = {
975 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
976 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
977
978 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
979 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
984 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
985
986 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
987 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
988 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
989 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
990 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
991
992 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
993 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
994
995 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
996 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
997 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
998
999 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1000 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1001
1002 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1003 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1004 };
1005
1006 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1007
1008
1009 /*
1010 * Actions for interface - statemachine.
1011 */
1012
1013 /**
1014 * dev_action_start
1015 * @fi: An instance of an interface statemachine.
1016 * @event: The event, just happened.
1017 * @arg: Generic pointer, casted from struct net_device * upon call.
1018 *
1019 * Startup connection by sending CONN_EVENT_START to it.
1020 */
1021 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1022 {
1023 struct net_device *dev = arg;
1024 struct netiucv_priv *privptr = netdev_priv(dev);
1025
1026 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1027
1028 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1029 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1030 }
1031
1032 /**
1033 * Shutdown connection by sending CONN_EVENT_STOP to it.
1034 *
1035 * @param fi An instance of an interface statemachine.
1036 * @param event The event, just happened.
1037 * @param arg Generic pointer, casted from struct net_device * upon call.
1038 */
1039 static void
1040 dev_action_stop(fsm_instance *fi, int event, void *arg)
1041 {
1042 struct net_device *dev = arg;
1043 struct netiucv_priv *privptr = netdev_priv(dev);
1044 struct iucv_event ev;
1045
1046 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1047
1048 ev.conn = privptr->conn;
1049
1050 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1051 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1052 }
1053
1054 /**
1055 * Called from connection statemachine
1056 * when a connection is up and running.
1057 *
1058 * @param fi An instance of an interface statemachine.
1059 * @param event The event, just happened.
1060 * @param arg Generic pointer, casted from struct net_device * upon call.
1061 */
1062 static void
1063 dev_action_connup(fsm_instance *fi, int event, void *arg)
1064 {
1065 struct net_device *dev = arg;
1066 struct netiucv_priv *privptr = netdev_priv(dev);
1067
1068 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1069
1070 switch (fsm_getstate(fi)) {
1071 case DEV_STATE_STARTWAIT:
1072 fsm_newstate(fi, DEV_STATE_RUNNING);
1073 PRINT_INFO("%s: connected with remote side %s\n",
1074 dev->name, privptr->conn->userid);
1075 IUCV_DBF_TEXT(setup, 3,
1076 "connection is up and running\n");
1077 break;
1078 case DEV_STATE_STOPWAIT:
1079 PRINT_INFO(
1080 "%s: got connection UP event during shutdown!\n",
1081 dev->name);
1082 IUCV_DBF_TEXT(data, 2,
1083 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1084 break;
1085 }
1086 }
1087
1088 /**
1089 * Called from connection statemachine
1090 * when a connection has been shutdown.
1091 *
1092 * @param fi An instance of an interface statemachine.
1093 * @param event The event, just happened.
1094 * @param arg Generic pointer, casted from struct net_device * upon call.
1095 */
1096 static void
1097 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1098 {
1099 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1100
1101 switch (fsm_getstate(fi)) {
1102 case DEV_STATE_RUNNING:
1103 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1104 break;
1105 case DEV_STATE_STOPWAIT:
1106 fsm_newstate(fi, DEV_STATE_STOPPED);
1107 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1108 break;
1109 }
1110 }
1111
1112 static const fsm_node dev_fsm[] = {
1113 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1114
1115 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1116 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1117
1118 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1119 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1120
1121 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1122 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1123 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
1124 };
1125
1126 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1127
1128 /**
1129 * Transmit a packet.
1130 * This is a helper function for netiucv_tx().
1131 *
1132 * @param conn Connection to be used for sending.
1133 * @param skb Pointer to struct sk_buff of packet to send.
1134 * The linklevel header has already been set up
1135 * by netiucv_tx().
1136 *
1137 * @return 0 on success, -ERRNO on failure. (Never fails.)
1138 */
1139 static int netiucv_transmit_skb(struct iucv_connection *conn,
1140 struct sk_buff *skb)
1141 {
1142 struct iucv_message msg;
1143 unsigned long saveflags;
1144 struct ll_header header;
1145 int rc;
1146
1147 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1148 int l = skb->len + NETIUCV_HDRLEN;
1149
1150 spin_lock_irqsave(&conn->collect_lock, saveflags);
1151 if (conn->collect_len + l >
1152 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1153 rc = -EBUSY;
1154 IUCV_DBF_TEXT(data, 2,
1155 "EBUSY from netiucv_transmit_skb\n");
1156 } else {
1157 atomic_inc(&skb->users);
1158 skb_queue_tail(&conn->collect_queue, skb);
1159 conn->collect_len += l;
1160 rc = 0;
1161 }
1162 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1163 } else {
1164 struct sk_buff *nskb = skb;
1165 /**
1166 * Copy the skb to a new allocated skb in lowmem only if the
1167 * data is located above 2G in memory or tailroom is < 2.
1168 */
1169 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1170 NETIUCV_HDRLEN)) >> 31;
1171 int copied = 0;
1172 if (hi || (skb_tailroom(skb) < 2)) {
1173 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1174 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1175 if (!nskb) {
1176 PRINT_WARN("%s: Could not allocate tx_skb\n",
1177 conn->netdev->name);
1178 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1179 rc = -ENOMEM;
1180 return rc;
1181 } else {
1182 skb_reserve(nskb, NETIUCV_HDRLEN);
1183 memcpy(skb_put(nskb, skb->len),
1184 skb->data, skb->len);
1185 }
1186 copied = 1;
1187 }
1188 /**
1189 * skb now is below 2G and has enough room. Add headers.
1190 */
1191 header.next = nskb->len + NETIUCV_HDRLEN;
1192 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1193 header.next = 0;
1194 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1195
1196 fsm_newstate(conn->fsm, CONN_STATE_TX);
1197 conn->prof.send_stamp = xtime;
1198
1199 msg.tag = 1;
1200 msg.class = 0;
1201 rc = iucv_message_send(conn->path, &msg, 0, 0,
1202 nskb->data, nskb->len);
1203 conn->prof.doios_single++;
1204 conn->prof.txlen += skb->len;
1205 conn->prof.tx_pending++;
1206 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1207 conn->prof.tx_max_pending = conn->prof.tx_pending;
1208 if (rc) {
1209 struct netiucv_priv *privptr;
1210 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1211 conn->prof.tx_pending--;
1212 privptr = netdev_priv(conn->netdev);
1213 if (privptr)
1214 privptr->stats.tx_errors++;
1215 if (copied)
1216 dev_kfree_skb(nskb);
1217 else {
1218 /**
1219 * Remove our headers. They get added
1220 * again on retransmit.
1221 */
1222 skb_pull(skb, NETIUCV_HDRLEN);
1223 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1224 }
1225 PRINT_WARN("iucv_send returned %08x\n", rc);
1226 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1227 } else {
1228 if (copied)
1229 dev_kfree_skb(skb);
1230 atomic_inc(&nskb->users);
1231 skb_queue_tail(&conn->commit_queue, nskb);
1232 }
1233 }
1234
1235 return rc;
1236 }
1237
1238 /*
1239 * Interface API for upper network layers
1240 */
1241
1242 /**
1243 * Open an interface.
1244 * Called from generic network layer when ifconfig up is run.
1245 *
1246 * @param dev Pointer to interface struct.
1247 *
1248 * @return 0 on success, -ERRNO on failure. (Never fails.)
1249 */
1250 static int netiucv_open(struct net_device *dev)
1251 {
1252 struct netiucv_priv *priv = netdev_priv(dev);
1253
1254 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1255 return 0;
1256 }
1257
1258 /**
1259 * Close an interface.
1260 * Called from generic network layer when ifconfig down is run.
1261 *
1262 * @param dev Pointer to interface struct.
1263 *
1264 * @return 0 on success, -ERRNO on failure. (Never fails.)
1265 */
1266 static int netiucv_close(struct net_device *dev)
1267 {
1268 struct netiucv_priv *priv = netdev_priv(dev);
1269
1270 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1271 return 0;
1272 }
1273
1274 /**
1275 * Start transmission of a packet.
1276 * Called from generic network device layer.
1277 *
1278 * @param skb Pointer to buffer containing the packet.
1279 * @param dev Pointer to interface struct.
1280 *
1281 * @return 0 if packet consumed, !0 if packet rejected.
1282 * Note: If we return !0, then the packet is free'd by
1283 * the generic network layer.
1284 */
1285 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1286 {
1287 struct netiucv_priv *privptr = netdev_priv(dev);
1288 int rc;
1289
1290 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1291 /**
1292 * Some sanity checks ...
1293 */
1294 if (skb == NULL) {
1295 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1296 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1297 privptr->stats.tx_dropped++;
1298 return 0;
1299 }
1300 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1301 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1302 dev->name, NETIUCV_HDRLEN);
1303 IUCV_DBF_TEXT(data, 2,
1304 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1305 dev_kfree_skb(skb);
1306 privptr->stats.tx_dropped++;
1307 return 0;
1308 }
1309
1310 /**
1311 * If connection is not running, try to restart it
1312 * and throw away packet.
1313 */
1314 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1315 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1316 dev_kfree_skb(skb);
1317 privptr->stats.tx_dropped++;
1318 privptr->stats.tx_errors++;
1319 privptr->stats.tx_carrier_errors++;
1320 return 0;
1321 }
1322
1323 if (netiucv_test_and_set_busy(dev)) {
1324 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1325 return -EBUSY;
1326 }
1327 dev->trans_start = jiffies;
1328 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1329 netiucv_clear_busy(dev);
1330 return rc;
1331 }
1332
1333 /**
1334 * netiucv_stats
1335 * @dev: Pointer to interface struct.
1336 *
1337 * Returns interface statistics of a device.
1338 *
1339 * Returns pointer to stats struct of this interface.
1340 */
1341 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1342 {
1343 struct netiucv_priv *priv = netdev_priv(dev);
1344
1345 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1346 return &priv->stats;
1347 }
1348
1349 /**
1350 * netiucv_change_mtu
1351 * @dev: Pointer to interface struct.
1352 * @new_mtu: The new MTU to use for this interface.
1353 *
1354 * Sets MTU of an interface.
1355 *
1356 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1357 * (valid range is 576 .. NETIUCV_MTU_MAX).
1358 */
1359 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1360 {
1361 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1362 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1363 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1364 return -EINVAL;
1365 }
1366 dev->mtu = new_mtu;
1367 return 0;
1368 }
1369
1370 /*
1371 * attributes in sysfs
1372 */
1373
1374 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1375 char *buf)
1376 {
1377 struct netiucv_priv *priv = dev->driver_data;
1378
1379 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1380 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1381 }
1382
1383 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1384 const char *buf, size_t count)
1385 {
1386 struct netiucv_priv *priv = dev->driver_data;
1387 struct net_device *ndev = priv->conn->netdev;
1388 char *p;
1389 char *tmp;
1390 char username[9];
1391 int i;
1392 struct iucv_connection *cp;
1393
1394 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1395 if (count > 9) {
1396 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1397 IUCV_DBF_TEXT_(setup, 2,
1398 "%d is length of username\n", (int) count);
1399 return -EINVAL;
1400 }
1401
1402 tmp = strsep((char **) &buf, "\n");
1403 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1404 if (isalnum(*p) || (*p == '$')) {
1405 username[i]= toupper(*p);
1406 continue;
1407 }
1408 if (*p == '\n') {
1409 /* trailing lf, grr */
1410 break;
1411 }
1412 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1413 IUCV_DBF_TEXT_(setup, 2,
1414 "username: invalid character %c\n", *p);
1415 return -EINVAL;
1416 }
1417 while (i < 8)
1418 username[i++] = ' ';
1419 username[8] = '\0';
1420
1421 if (memcmp(username, priv->conn->userid, 9) &&
1422 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1423 /* username changed while the interface is active. */
1424 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1425 dev->bus_id, priv->conn->userid);
1426 PRINT_WARN("netiucv: user cannot be updated\n");
1427 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1428 return -EBUSY;
1429 }
1430 read_lock_bh(&iucv_connection_rwlock);
1431 list_for_each_entry(cp, &iucv_connection_list, list) {
1432 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1433 read_unlock_bh(&iucv_connection_rwlock);
1434 PRINT_WARN("netiucv: Connection to %s already "
1435 "exists\n", username);
1436 return -EEXIST;
1437 }
1438 }
1439 read_unlock_bh(&iucv_connection_rwlock);
1440 memcpy(priv->conn->userid, username, 9);
1441 return count;
1442 }
1443
1444 static DEVICE_ATTR(user, 0644, user_show, user_write);
1445
1446 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1447 char *buf)
1448 { struct netiucv_priv *priv = dev->driver_data;
1449
1450 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1451 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1452 }
1453
1454 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1455 const char *buf, size_t count)
1456 {
1457 struct netiucv_priv *priv = dev->driver_data;
1458 struct net_device *ndev = priv->conn->netdev;
1459 char *e;
1460 int bs1;
1461
1462 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1463 if (count >= 39)
1464 return -EINVAL;
1465
1466 bs1 = simple_strtoul(buf, &e, 0);
1467
1468 if (e && (!isspace(*e))) {
1469 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1470 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1471 return -EINVAL;
1472 }
1473 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1474 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too large\n",
1478 bs1);
1479 return -EINVAL;
1480 }
1481 if ((ndev->flags & IFF_RUNNING) &&
1482 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1483 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1484 bs1);
1485 IUCV_DBF_TEXT_(setup, 2,
1486 "buffer_write: buffer size %d too small\n",
1487 bs1);
1488 return -EINVAL;
1489 }
1490 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1491 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1492 bs1);
1493 IUCV_DBF_TEXT_(setup, 2,
1494 "buffer_write: buffer size %d too small\n",
1495 bs1);
1496 return -EINVAL;
1497 }
1498
1499 priv->conn->max_buffsize = bs1;
1500 if (!(ndev->flags & IFF_RUNNING))
1501 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1502
1503 return count;
1504
1505 }
1506
1507 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1508
1509 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1510 char *buf)
1511 {
1512 struct netiucv_priv *priv = dev->driver_data;
1513
1514 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1515 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1516 }
1517
1518 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1519
1520 static ssize_t conn_fsm_show (struct device *dev,
1521 struct device_attribute *attr, char *buf)
1522 {
1523 struct netiucv_priv *priv = dev->driver_data;
1524
1525 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1526 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1527 }
1528
1529 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1530
1531 static ssize_t maxmulti_show (struct device *dev,
1532 struct device_attribute *attr, char *buf)
1533 {
1534 struct netiucv_priv *priv = dev->driver_data;
1535
1536 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1537 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1538 }
1539
1540 static ssize_t maxmulti_write (struct device *dev,
1541 struct device_attribute *attr,
1542 const char *buf, size_t count)
1543 {
1544 struct netiucv_priv *priv = dev->driver_data;
1545
1546 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1547 priv->conn->prof.maxmulti = 0;
1548 return count;
1549 }
1550
1551 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1552
1553 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1554 char *buf)
1555 {
1556 struct netiucv_priv *priv = dev->driver_data;
1557
1558 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1559 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1560 }
1561
1562 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1563 const char *buf, size_t count)
1564 {
1565 struct netiucv_priv *priv = dev->driver_data;
1566
1567 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1568 priv->conn->prof.maxcqueue = 0;
1569 return count;
1570 }
1571
1572 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1573
1574 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1575 char *buf)
1576 {
1577 struct netiucv_priv *priv = dev->driver_data;
1578
1579 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1580 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1581 }
1582
1583 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1584 const char *buf, size_t count)
1585 {
1586 struct netiucv_priv *priv = dev->driver_data;
1587
1588 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1589 priv->conn->prof.doios_single = 0;
1590 return count;
1591 }
1592
1593 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1594
1595 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1596 char *buf)
1597 {
1598 struct netiucv_priv *priv = dev->driver_data;
1599
1600 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1601 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1602 }
1603
1604 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1606 {
1607 struct netiucv_priv *priv = dev->driver_data;
1608
1609 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1610 priv->conn->prof.doios_multi = 0;
1611 return count;
1612 }
1613
1614 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1615
1616 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1617 char *buf)
1618 {
1619 struct netiucv_priv *priv = dev->driver_data;
1620
1621 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1622 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1623 }
1624
1625 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1626 const char *buf, size_t count)
1627 {
1628 struct netiucv_priv *priv = dev->driver_data;
1629
1630 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1631 priv->conn->prof.txlen = 0;
1632 return count;
1633 }
1634
1635 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1636
1637 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1638 char *buf)
1639 {
1640 struct netiucv_priv *priv = dev->driver_data;
1641
1642 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1643 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1644 }
1645
1646 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1647 const char *buf, size_t count)
1648 {
1649 struct netiucv_priv *priv = dev->driver_data;
1650
1651 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1652 priv->conn->prof.tx_time = 0;
1653 return count;
1654 }
1655
1656 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1657
1658 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1659 char *buf)
1660 {
1661 struct netiucv_priv *priv = dev->driver_data;
1662
1663 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1664 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1665 }
1666
1667 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1668 const char *buf, size_t count)
1669 {
1670 struct netiucv_priv *priv = dev->driver_data;
1671
1672 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1673 priv->conn->prof.tx_pending = 0;
1674 return count;
1675 }
1676
1677 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1678
1679 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1680 char *buf)
1681 {
1682 struct netiucv_priv *priv = dev->driver_data;
1683
1684 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1685 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1686 }
1687
1688 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1689 const char *buf, size_t count)
1690 {
1691 struct netiucv_priv *priv = dev->driver_data;
1692
1693 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1694 priv->conn->prof.tx_max_pending = 0;
1695 return count;
1696 }
1697
1698 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1699
1700 static struct attribute *netiucv_attrs[] = {
1701 &dev_attr_buffer.attr,
1702 &dev_attr_user.attr,
1703 NULL,
1704 };
1705
1706 static struct attribute_group netiucv_attr_group = {
1707 .attrs = netiucv_attrs,
1708 };
1709
1710 static struct attribute *netiucv_stat_attrs[] = {
1711 &dev_attr_device_fsm_state.attr,
1712 &dev_attr_connection_fsm_state.attr,
1713 &dev_attr_max_tx_buffer_used.attr,
1714 &dev_attr_max_chained_skbs.attr,
1715 &dev_attr_tx_single_write_ops.attr,
1716 &dev_attr_tx_multi_write_ops.attr,
1717 &dev_attr_netto_bytes.attr,
1718 &dev_attr_max_tx_io_time.attr,
1719 &dev_attr_tx_pending.attr,
1720 &dev_attr_tx_max_pending.attr,
1721 NULL,
1722 };
1723
1724 static struct attribute_group netiucv_stat_attr_group = {
1725 .name = "stats",
1726 .attrs = netiucv_stat_attrs,
1727 };
1728
1729 static inline int netiucv_add_files(struct device *dev)
1730 {
1731 int ret;
1732
1733 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1734 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1735 if (ret)
1736 return ret;
1737 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1738 if (ret)
1739 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1740 return ret;
1741 }
1742
1743 static inline void netiucv_remove_files(struct device *dev)
1744 {
1745 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1746 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1747 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1748 }
1749
1750 static int netiucv_register_device(struct net_device *ndev)
1751 {
1752 struct netiucv_priv *priv = netdev_priv(ndev);
1753 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1754 int ret;
1755
1756
1757 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1758
1759 if (dev) {
1760 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1761 dev->bus = &iucv_bus;
1762 dev->parent = iucv_root;
1763 /*
1764 * The release function could be called after the
1765 * module has been unloaded. It's _only_ task is to
1766 * free the struct. Therefore, we specify kfree()
1767 * directly here. (Probably a little bit obfuscating
1768 * but legitime ...).
1769 */
1770 dev->release = (void (*)(struct device *))kfree;
1771 dev->driver = &netiucv_driver;
1772 } else
1773 return -ENOMEM;
1774
1775 ret = device_register(dev);
1776
1777 if (ret)
1778 return ret;
1779 ret = netiucv_add_files(dev);
1780 if (ret)
1781 goto out_unreg;
1782 priv->dev = dev;
1783 dev->driver_data = priv;
1784 return 0;
1785
1786 out_unreg:
1787 device_unregister(dev);
1788 return ret;
1789 }
1790
1791 static void netiucv_unregister_device(struct device *dev)
1792 {
1793 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1794 netiucv_remove_files(dev);
1795 device_unregister(dev);
1796 }
1797
1798 /**
1799 * Allocate and initialize a new connection structure.
1800 * Add it to the list of netiucv connections;
1801 */
1802 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1803 char *username)
1804 {
1805 struct iucv_connection *conn;
1806
1807 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1808 if (!conn)
1809 goto out;
1810 skb_queue_head_init(&conn->collect_queue);
1811 skb_queue_head_init(&conn->commit_queue);
1812 spin_lock_init(&conn->collect_lock);
1813 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1814 conn->netdev = dev;
1815
1816 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1817 if (!conn->rx_buff)
1818 goto out_conn;
1819 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1820 if (!conn->tx_buff)
1821 goto out_rx;
1822 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1823 conn_event_names, NR_CONN_STATES,
1824 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1825 GFP_KERNEL);
1826 if (!conn->fsm)
1827 goto out_tx;
1828
1829 fsm_settimer(conn->fsm, &conn->timer);
1830 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1831
1832 if (username) {
1833 memcpy(conn->userid, username, 9);
1834 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1835 }
1836
1837 write_lock_bh(&iucv_connection_rwlock);
1838 list_add_tail(&conn->list, &iucv_connection_list);
1839 write_unlock_bh(&iucv_connection_rwlock);
1840 return conn;
1841
1842 out_tx:
1843 kfree_skb(conn->tx_buff);
1844 out_rx:
1845 kfree_skb(conn->rx_buff);
1846 out_conn:
1847 kfree(conn);
1848 out:
1849 return NULL;
1850 }
1851
1852 /**
1853 * Release a connection structure and remove it from the
1854 * list of netiucv connections.
1855 */
1856 static void netiucv_remove_connection(struct iucv_connection *conn)
1857 {
1858 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1859 write_lock_bh(&iucv_connection_rwlock);
1860 list_del_init(&conn->list);
1861 write_unlock_bh(&iucv_connection_rwlock);
1862 if (conn->path) {
1863 iucv_path_sever(conn->path, iucvMagic);
1864 kfree(conn->path);
1865 conn->path = NULL;
1866 }
1867 fsm_deltimer(&conn->timer);
1868 kfree_fsm(conn->fsm);
1869 kfree_skb(conn->rx_buff);
1870 kfree_skb(conn->tx_buff);
1871 }
1872
1873 /**
1874 * Release everything of a net device.
1875 */
1876 static void netiucv_free_netdevice(struct net_device *dev)
1877 {
1878 struct netiucv_priv *privptr = netdev_priv(dev);
1879
1880 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1881
1882 if (!dev)
1883 return;
1884
1885 if (privptr) {
1886 if (privptr->conn)
1887 netiucv_remove_connection(privptr->conn);
1888 if (privptr->fsm)
1889 kfree_fsm(privptr->fsm);
1890 privptr->conn = NULL; privptr->fsm = NULL;
1891 /* privptr gets freed by free_netdev() */
1892 }
1893 free_netdev(dev);
1894 }
1895
1896 /**
1897 * Initialize a net device. (Called from kernel in alloc_netdev())
1898 */
1899 static void netiucv_setup_netdevice(struct net_device *dev)
1900 {
1901 dev->mtu = NETIUCV_MTU_DEFAULT;
1902 dev->hard_start_xmit = netiucv_tx;
1903 dev->open = netiucv_open;
1904 dev->stop = netiucv_close;
1905 dev->get_stats = netiucv_stats;
1906 dev->change_mtu = netiucv_change_mtu;
1907 dev->destructor = netiucv_free_netdevice;
1908 dev->hard_header_len = NETIUCV_HDRLEN;
1909 dev->addr_len = 0;
1910 dev->type = ARPHRD_SLIP;
1911 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1912 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1913 SET_MODULE_OWNER(dev);
1914 }
1915
1916 /**
1917 * Allocate and initialize everything of a net device.
1918 */
1919 static struct net_device *netiucv_init_netdevice(char *username)
1920 {
1921 struct netiucv_priv *privptr;
1922 struct net_device *dev;
1923
1924 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1925 netiucv_setup_netdevice);
1926 if (!dev)
1927 return NULL;
1928 if (dev_alloc_name(dev, dev->name) < 0)
1929 goto out_netdev;
1930
1931 privptr = netdev_priv(dev);
1932 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1933 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1934 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1935 if (!privptr->fsm)
1936 goto out_netdev;
1937
1938 privptr->conn = netiucv_new_connection(dev, username);
1939 if (!privptr->conn) {
1940 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1941 goto out_fsm;
1942 }
1943 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1944 return dev;
1945
1946 out_fsm:
1947 kfree_fsm(privptr->fsm);
1948 out_netdev:
1949 free_netdev(dev);
1950 return NULL;
1951 }
1952
1953 static ssize_t conn_write(struct device_driver *drv,
1954 const char *buf, size_t count)
1955 {
1956 const char *p;
1957 char username[9];
1958 int i, rc;
1959 struct net_device *dev;
1960 struct netiucv_priv *priv;
1961 struct iucv_connection *cp;
1962
1963 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1964 if (count>9) {
1965 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1966 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1967 return -EINVAL;
1968 }
1969
1970 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1971 if (isalnum(*p) || *p == '$') {
1972 username[i] = toupper(*p);
1973 continue;
1974 }
1975 if (*p == '\n')
1976 /* trailing lf, grr */
1977 break;
1978 PRINT_WARN("netiucv: Invalid character in username!\n");
1979 IUCV_DBF_TEXT_(setup, 2,
1980 "conn_write: invalid character %c\n", *p);
1981 return -EINVAL;
1982 }
1983 while (i < 8)
1984 username[i++] = ' ';
1985 username[8] = '\0';
1986
1987 read_lock_bh(&iucv_connection_rwlock);
1988 list_for_each_entry(cp, &iucv_connection_list, list) {
1989 if (!strncmp(username, cp->userid, 9)) {
1990 read_unlock_bh(&iucv_connection_rwlock);
1991 PRINT_WARN("netiucv: Connection to %s already "
1992 "exists\n", username);
1993 return -EEXIST;
1994 }
1995 }
1996 read_unlock_bh(&iucv_connection_rwlock);
1997
1998 dev = netiucv_init_netdevice(username);
1999 if (!dev) {
2000 PRINT_WARN("netiucv: Could not allocate network device "
2001 "structure for user '%s'\n",
2002 netiucv_printname(username));
2003 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2004 return -ENODEV;
2005 }
2006
2007 rc = netiucv_register_device(dev);
2008 if (rc) {
2009 IUCV_DBF_TEXT_(setup, 2,
2010 "ret %d from netiucv_register_device\n", rc);
2011 goto out_free_ndev;
2012 }
2013
2014 /* sysfs magic */
2015 priv = netdev_priv(dev);
2016 SET_NETDEV_DEV(dev, priv->dev);
2017
2018 rc = register_netdev(dev);
2019 if (rc)
2020 goto out_unreg;
2021
2022 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2023
2024 return count;
2025
2026 out_unreg:
2027 netiucv_unregister_device(priv->dev);
2028 out_free_ndev:
2029 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2030 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2031 netiucv_free_netdevice(dev);
2032 return rc;
2033 }
2034
2035 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2036
2037 static ssize_t remove_write (struct device_driver *drv,
2038 const char *buf, size_t count)
2039 {
2040 struct iucv_connection *cp;
2041 struct net_device *ndev;
2042 struct netiucv_priv *priv;
2043 struct device *dev;
2044 char name[IFNAMSIZ];
2045 const char *p;
2046 int i;
2047
2048 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2049
2050 if (count >= IFNAMSIZ)
2051 count = IFNAMSIZ - 1;;
2052
2053 for (i = 0, p = buf; i < count && *p; i++, p++) {
2054 if (*p == '\n' || *p == ' ')
2055 /* trailing lf, grr */
2056 break;
2057 name[i] = *p;
2058 }
2059 name[i] = '\0';
2060
2061 read_lock_bh(&iucv_connection_rwlock);
2062 list_for_each_entry(cp, &iucv_connection_list, list) {
2063 ndev = cp->netdev;
2064 priv = netdev_priv(ndev);
2065 dev = priv->dev;
2066 if (strncmp(name, ndev->name, count))
2067 continue;
2068 read_unlock_bh(&iucv_connection_rwlock);
2069 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2070 PRINT_WARN("netiucv: net device %s active with peer "
2071 "%s\n", ndev->name, priv->conn->userid);
2072 PRINT_WARN("netiucv: %s cannot be removed\n",
2073 ndev->name);
2074 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2075 return -EBUSY;
2076 }
2077 unregister_netdev(ndev);
2078 netiucv_unregister_device(dev);
2079 return count;
2080 }
2081 read_unlock_bh(&iucv_connection_rwlock);
2082 PRINT_WARN("netiucv: net device %s unknown\n", name);
2083 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2084 return -EINVAL;
2085 }
2086
2087 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2088
2089 static struct attribute * netiucv_drv_attrs[] = {
2090 &driver_attr_connection.attr,
2091 &driver_attr_remove.attr,
2092 NULL,
2093 };
2094
2095 static struct attribute_group netiucv_drv_attr_group = {
2096 .attrs = netiucv_drv_attrs,
2097 };
2098
2099 static void netiucv_banner(void)
2100 {
2101 PRINT_INFO("NETIUCV driver initialized\n");
2102 }
2103
2104 static void __exit netiucv_exit(void)
2105 {
2106 struct iucv_connection *cp;
2107 struct net_device *ndev;
2108 struct netiucv_priv *priv;
2109 struct device *dev;
2110
2111 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2112 while (!list_empty(&iucv_connection_list)) {
2113 cp = list_entry(iucv_connection_list.next,
2114 struct iucv_connection, list);
2115 list_del(&cp->list);
2116 ndev = cp->netdev;
2117 priv = netdev_priv(ndev);
2118 dev = priv->dev;
2119
2120 unregister_netdev(ndev);
2121 netiucv_unregister_device(dev);
2122 }
2123
2124 sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2125 driver_unregister(&netiucv_driver);
2126 iucv_unregister(&netiucv_handler, 1);
2127 iucv_unregister_dbf_views();
2128
2129 PRINT_INFO("NETIUCV driver unloaded\n");
2130 return;
2131 }
2132
2133 static int __init netiucv_init(void)
2134 {
2135 int rc;
2136
2137 rc = iucv_register_dbf_views();
2138 if (rc)
2139 goto out;
2140 rc = iucv_register(&netiucv_handler, 1);
2141 if (rc)
2142 goto out_dbf;
2143 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2144 rc = driver_register(&netiucv_driver);
2145 if (rc) {
2146 PRINT_ERR("NETIUCV: failed to register driver.\n");
2147 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2148 goto out_iucv;
2149 }
2150
2151 rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2152 if (rc) {
2153 PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
2154 IUCV_DBF_TEXT_(setup, 2,
2155 "ret %d - netiucv_drv_attr_group\n", rc);
2156 goto out_driver;
2157 }
2158 netiucv_banner();
2159 return rc;
2160
2161 out_driver:
2162 driver_unregister(&netiucv_driver);
2163 out_iucv:
2164 iucv_unregister(&netiucv_handler, 1);
2165 out_dbf:
2166 iucv_unregister_dbf_views();
2167 out:
2168 return rc;
2169 }
2170
2171 module_init(netiucv_init);
2172 module_exit(netiucv_exit);
2173 MODULE_LICENSE("GPL");