1 // SPDX-License-Identifier: GPL-2.0-only
3 * IUCV protocol stack for Linux on zSeries
5 * Copyright IBM Corp. 2006, 2009
7 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
8 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 * Ursula Braun <ursula.braun@de.ibm.com>
13 #define KMSG_COMPONENT "af_iucv"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/filter.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/types.h>
20 #include <linux/limits.h>
21 #include <linux/list.h>
22 #include <linux/errno.h>
23 #include <linux/kernel.h>
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h>
26 #include <linux/skbuff.h>
27 #include <linux/init.h>
28 #include <linux/poll.h>
29 #include <linux/security.h>
31 #include <asm/ebcdic.h>
32 #include <asm/cpcmd.h>
33 #include <linux/kmod.h>
35 #include <net/iucv/af_iucv.h>
39 static char iucv_userid
[80];
41 static struct proto iucv_proto
= {
44 .obj_size
= sizeof(struct iucv_sock
),
47 static struct iucv_interface
*pr_iucv
;
48 static struct iucv_handler af_iucv_handler
;
50 /* special AF_IUCV IPRM messages */
51 static const u8 iprm_shutdown
[8] =
52 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
54 #define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
56 #define __iucv_sock_wait(sk, condition, timeo, ret) \
58 DEFINE_WAIT(__wait); \
59 long __timeo = timeo; \
61 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
62 while (!(condition)) { \
67 if (signal_pending(current)) { \
68 ret = sock_intr_errno(__timeo); \
72 __timeo = schedule_timeout(__timeo); \
74 ret = sock_error(sk); \
78 finish_wait(sk_sleep(sk), &__wait); \
81 #define iucv_sock_wait(sk, condition, timeo) \
85 __iucv_sock_wait(sk, condition, timeo, __ret); \
89 static struct sock
*iucv_accept_dequeue(struct sock
*parent
,
90 struct socket
*newsock
);
91 static void iucv_sock_kill(struct sock
*sk
);
92 static void iucv_sock_close(struct sock
*sk
);
94 static void afiucv_hs_callback_txnotify(struct sock
*sk
, enum iucv_tx_notify
);
96 static struct iucv_sock_list iucv_sk_list
= {
97 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
98 .autobind_name
= ATOMIC_INIT(0)
101 static inline void high_nmcpy(unsigned char *dst
, char *src
)
106 static inline void low_nmcpy(unsigned char *dst
, char *src
)
108 memcpy(&dst
[8], src
, 8);
112 * iucv_msg_length() - Returns the length of an iucv message.
113 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
115 * The function returns the length of the specified iucv message @msg of data
116 * stored in a buffer and of data stored in the parameter list (PRMDATA).
118 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
120 * PRMDATA[0..6] socket data (max 7 bytes);
121 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
123 * The socket data length is computed by subtracting the socket data length
125 * If the socket data len is greater 7, then PRMDATA can be used for special
126 * notifications (see iucv_sock_shutdown); and further,
127 * if the socket data len is > 7, the function returns 8.
129 * Use this function to allocate socket buffers to store iucv message data.
131 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
135 if (msg
->flags
& IUCV_IPRMDATA
) {
136 datalen
= 0xff - msg
->rmmsg
[7];
137 return (datalen
< 8) ? datalen
: 8;
143 * iucv_sock_in_state() - check for specific states
144 * @sk: sock structure
145 * @state: first iucv sk state
146 * @state2: second iucv sk state
148 * Returns true if the socket in either in the first or second state.
150 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
152 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
156 * iucv_below_msglim() - function to check if messages can be sent
157 * @sk: sock structure
159 * Returns true if the send queue length is lower than the message limit.
160 * Always returns true if the socket is not connected (no iucv path for
161 * checking the message limit).
163 static inline int iucv_below_msglim(struct sock
*sk
)
165 struct iucv_sock
*iucv
= iucv_sk(sk
);
167 if (sk
->sk_state
!= IUCV_CONNECTED
)
169 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
170 return (atomic_read(&iucv
->skbs_in_xmit
) < iucv
->path
->msglim
);
172 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
173 (atomic_read(&iucv
->pendings
) <= 0));
177 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
179 static void iucv_sock_wake_msglim(struct sock
*sk
)
181 struct socket_wq
*wq
;
184 wq
= rcu_dereference(sk
->sk_wq
);
185 if (skwq_has_sleeper(wq
))
186 wake_up_interruptible_all(&wq
->wait
);
187 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
192 * afiucv_hs_send() - send a message through HiperSockets transport
194 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
195 struct sk_buff
*skb
, u8 flags
)
197 struct iucv_sock
*iucv
= iucv_sk(sock
);
198 struct af_iucv_trans_hdr
*phs_hdr
;
199 int err
, confirm_recv
= 0;
201 phs_hdr
= skb_push(skb
, sizeof(*phs_hdr
));
202 memset(phs_hdr
, 0, sizeof(*phs_hdr
));
203 skb_reset_network_header(skb
);
205 phs_hdr
->magic
= ETH_P_AF_IUCV
;
206 phs_hdr
->version
= 1;
207 phs_hdr
->flags
= flags
;
208 if (flags
== AF_IUCV_FLAG_SYN
)
209 phs_hdr
->window
= iucv
->msglimit
;
210 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
211 confirm_recv
= atomic_read(&iucv
->msg_recv
);
212 phs_hdr
->window
= confirm_recv
;
214 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
216 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
217 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
218 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
219 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
220 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
221 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
222 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
223 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
225 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
227 skb
->dev
= iucv
->hs_dev
;
233 dev_hard_header(skb
, skb
->dev
, ETH_P_AF_IUCV
, NULL
, NULL
, skb
->len
);
235 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
)) {
239 if (skb
->len
> skb
->dev
->mtu
) {
240 if (sock
->sk_type
== SOCK_SEQPACKET
) {
244 err
= pskb_trim(skb
, skb
->dev
->mtu
);
248 skb
->protocol
= cpu_to_be16(ETH_P_AF_IUCV
);
250 atomic_inc(&iucv
->skbs_in_xmit
);
251 err
= dev_queue_xmit(skb
);
252 if (net_xmit_eval(err
)) {
253 atomic_dec(&iucv
->skbs_in_xmit
);
255 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
256 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
258 return net_xmit_eval(err
);
265 static struct sock
*__iucv_get_sock_by_name(char *nm
)
269 sk_for_each(sk
, &iucv_sk_list
.head
)
270 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
276 static void iucv_sock_destruct(struct sock
*sk
)
278 skb_queue_purge(&sk
->sk_receive_queue
);
279 skb_queue_purge(&sk
->sk_error_queue
);
281 if (!sock_flag(sk
, SOCK_DEAD
)) {
282 pr_err("Attempt to release alive iucv socket %p\n", sk
);
286 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
287 WARN_ON(refcount_read(&sk
->sk_wmem_alloc
));
288 WARN_ON(sk
->sk_wmem_queued
);
289 WARN_ON(sk
->sk_forward_alloc
);
293 static void iucv_sock_cleanup_listen(struct sock
*parent
)
297 /* Close non-accepted connections */
298 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
303 parent
->sk_state
= IUCV_CLOSED
;
306 static void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
308 write_lock_bh(&l
->lock
);
309 sk_add_node(sk
, &l
->head
);
310 write_unlock_bh(&l
->lock
);
313 static void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
315 write_lock_bh(&l
->lock
);
316 sk_del_node_init(sk
);
317 write_unlock_bh(&l
->lock
);
320 /* Kill socket (only if zapped and orphaned) */
321 static void iucv_sock_kill(struct sock
*sk
)
323 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
326 iucv_sock_unlink(&iucv_sk_list
, sk
);
327 sock_set_flag(sk
, SOCK_DEAD
);
331 /* Terminate an IUCV path */
332 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
334 unsigned char user_data
[16];
335 struct iucv_sock
*iucv
= iucv_sk(sk
);
336 struct iucv_path
*path
= iucv
->path
;
340 if (with_user_data
) {
341 low_nmcpy(user_data
, iucv
->src_name
);
342 high_nmcpy(user_data
, iucv
->dst_name
);
343 ASCEBC(user_data
, sizeof(user_data
));
344 pr_iucv
->path_sever(path
, user_data
);
346 pr_iucv
->path_sever(path
, NULL
);
347 iucv_path_free(path
);
351 /* Send controlling flags through an IUCV socket for HIPER transport */
352 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
354 struct iucv_sock
*iucv
= iucv_sk(sk
);
360 blen
= sizeof(struct af_iucv_trans_hdr
) +
361 LL_RESERVED_SPACE(iucv
->hs_dev
);
362 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
363 /* controlling flags should be sent anyway */
364 shutdown
= sk
->sk_shutdown
;
365 sk
->sk_shutdown
&= RCV_SHUTDOWN
;
367 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
369 skb_reserve(skb
, blen
);
370 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
373 sk
->sk_shutdown
= shutdown
;
377 /* Close an IUCV socket */
378 static void iucv_sock_close(struct sock
*sk
)
380 struct iucv_sock
*iucv
= iucv_sk(sk
);
386 switch (sk
->sk_state
) {
388 iucv_sock_cleanup_listen(sk
);
392 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
393 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
394 sk
->sk_state
= IUCV_DISCONN
;
395 sk
->sk_state_change(sk
);
400 sk
->sk_state
= IUCV_CLOSING
;
401 sk
->sk_state_change(sk
);
403 if (!err
&& atomic_read(&iucv
->skbs_in_xmit
) > 0) {
404 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
405 timeo
= sk
->sk_lingertime
;
407 timeo
= IUCV_DISCONN_TIMEOUT
;
409 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
415 sk
->sk_state
= IUCV_CLOSED
;
416 sk
->sk_state_change(sk
);
418 sk
->sk_err
= ECONNRESET
;
419 sk
->sk_state_change(sk
);
421 skb_queue_purge(&iucv
->send_skb_q
);
422 skb_queue_purge(&iucv
->backlog_skb_q
);
426 iucv_sever_path(sk
, 1);
430 dev_put(iucv
->hs_dev
);
432 sk
->sk_bound_dev_if
= 0;
435 /* mark socket for deletion by iucv_sock_kill() */
436 sock_set_flag(sk
, SOCK_ZAPPED
);
441 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
444 sk
->sk_type
= parent
->sk_type
;
445 security_sk_clone(parent
, sk
);
449 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
, int kern
)
452 struct iucv_sock
*iucv
;
454 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
, kern
);
459 sock_init_data(sock
, sk
);
460 INIT_LIST_HEAD(&iucv
->accept_q
);
461 spin_lock_init(&iucv
->accept_q_lock
);
462 skb_queue_head_init(&iucv
->send_skb_q
);
463 INIT_LIST_HEAD(&iucv
->message_q
.list
);
464 spin_lock_init(&iucv
->message_q
.lock
);
465 skb_queue_head_init(&iucv
->backlog_skb_q
);
467 atomic_set(&iucv
->pendings
, 0);
470 atomic_set(&iucv
->skbs_in_xmit
, 0);
471 atomic_set(&iucv
->msg_sent
, 0);
472 atomic_set(&iucv
->msg_recv
, 0);
474 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
475 memset(&iucv
->init
, 0, sizeof(iucv
->init
));
477 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
479 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
481 sk
->sk_destruct
= iucv_sock_destruct
;
482 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
484 sock_reset_flag(sk
, SOCK_ZAPPED
);
486 sk
->sk_protocol
= proto
;
487 sk
->sk_state
= IUCV_OPEN
;
489 iucv_sock_link(&iucv_sk_list
, sk
);
493 static void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
496 struct iucv_sock
*par
= iucv_sk(parent
);
499 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
500 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
501 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
502 iucv_sk(sk
)->parent
= parent
;
503 sk_acceptq_added(parent
);
506 static void iucv_accept_unlink(struct sock
*sk
)
509 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
511 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
512 list_del_init(&iucv_sk(sk
)->accept_q
);
513 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
514 sk_acceptq_removed(iucv_sk(sk
)->parent
);
515 iucv_sk(sk
)->parent
= NULL
;
519 static struct sock
*iucv_accept_dequeue(struct sock
*parent
,
520 struct socket
*newsock
)
522 struct iucv_sock
*isk
, *n
;
525 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
526 sk
= (struct sock
*) isk
;
529 if (sk
->sk_state
== IUCV_CLOSED
) {
530 iucv_accept_unlink(sk
);
535 if (sk
->sk_state
== IUCV_CONNECTED
||
536 sk
->sk_state
== IUCV_DISCONN
||
538 iucv_accept_unlink(sk
);
540 sock_graft(sk
, newsock
);
551 static void __iucv_auto_name(struct iucv_sock
*iucv
)
555 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
556 while (__iucv_get_sock_by_name(name
)) {
557 sprintf(name
, "%08x",
558 atomic_inc_return(&iucv_sk_list
.autobind_name
));
560 memcpy(iucv
->src_name
, name
, 8);
563 /* Bind an unbound socket */
564 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
567 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
568 char uid
[sizeof(sa
->siucv_user_id
)];
569 struct sock
*sk
= sock
->sk
;
570 struct iucv_sock
*iucv
;
572 struct net_device
*dev
;
574 /* Verify the input sockaddr */
575 if (addr_len
< sizeof(struct sockaddr_iucv
) ||
576 addr
->sa_family
!= AF_IUCV
)
580 if (sk
->sk_state
!= IUCV_OPEN
) {
585 write_lock_bh(&iucv_sk_list
.lock
);
588 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
595 /* Bind the socket */
597 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
598 goto vm_bind
; /* VM IUCV transport */
600 /* try hiper transport */
601 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
604 for_each_netdev_rcu(&init_net
, dev
) {
605 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
606 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
607 /* Check for uninitialized siucv_name */
608 if (strncmp(sa
->siucv_name
, " ", 8) == 0)
609 __iucv_auto_name(iucv
);
611 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
612 sk
->sk_bound_dev_if
= dev
->ifindex
;
615 sk
->sk_state
= IUCV_BOUND
;
616 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
618 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
626 /* use local userid for backward compat */
627 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
628 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
629 sk
->sk_state
= IUCV_BOUND
;
630 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
631 sk
->sk_allocation
|= GFP_DMA
;
633 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
636 /* found no dev to bind */
639 /* Release the socket list lock */
640 write_unlock_bh(&iucv_sk_list
.lock
);
646 /* Automatically bind an unbound socket */
647 static int iucv_sock_autobind(struct sock
*sk
)
649 struct iucv_sock
*iucv
= iucv_sk(sk
);
652 if (unlikely(!pr_iucv
))
655 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
656 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
657 sk
->sk_allocation
|= GFP_DMA
;
659 write_lock_bh(&iucv_sk_list
.lock
);
660 __iucv_auto_name(iucv
);
661 write_unlock_bh(&iucv_sk_list
.lock
);
664 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
669 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
671 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
672 struct sock
*sk
= sock
->sk
;
673 struct iucv_sock
*iucv
= iucv_sk(sk
);
674 unsigned char user_data
[16];
677 high_nmcpy(user_data
, sa
->siucv_name
);
678 low_nmcpy(user_data
, iucv
->src_name
);
679 ASCEBC(user_data
, sizeof(user_data
));
682 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
683 IUCV_IPRMDATA
, GFP_KERNEL
);
688 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
689 sa
->siucv_user_id
, NULL
, user_data
,
692 iucv_path_free(iucv
->path
);
695 case 0x0b: /* Target communicator is not logged on */
698 case 0x0d: /* Max connections for this guest exceeded */
699 case 0x0e: /* Max connections for target guest exceeded */
702 case 0x0f: /* Missing IUCV authorization */
714 /* Connect an unconnected socket */
715 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
718 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
719 struct sock
*sk
= sock
->sk
;
720 struct iucv_sock
*iucv
= iucv_sk(sk
);
723 if (alen
< sizeof(struct sockaddr_iucv
) || addr
->sa_family
!= AF_IUCV
)
726 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
729 if (sk
->sk_state
== IUCV_OPEN
&&
730 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
731 return -EBADFD
; /* explicit bind required */
733 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
736 if (sk
->sk_state
== IUCV_OPEN
) {
737 err
= iucv_sock_autobind(sk
);
744 /* Set the destination information */
745 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
746 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
748 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
749 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
751 err
= afiucv_path_connect(sock
, addr
);
755 if (sk
->sk_state
!= IUCV_CONNECTED
)
756 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
758 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
760 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
763 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
764 iucv_sever_path(sk
, 0);
771 /* Move a socket into listening state. */
772 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
774 struct sock
*sk
= sock
->sk
;
780 if (sk
->sk_state
!= IUCV_BOUND
)
783 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
786 sk
->sk_max_ack_backlog
= backlog
;
787 sk
->sk_ack_backlog
= 0;
788 sk
->sk_state
= IUCV_LISTEN
;
796 /* Accept a pending connection */
797 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
798 int flags
, bool kern
)
800 DECLARE_WAITQUEUE(wait
, current
);
801 struct sock
*sk
= sock
->sk
, *nsk
;
805 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
807 if (sk
->sk_state
!= IUCV_LISTEN
) {
812 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
814 /* Wait for an incoming connection */
815 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
816 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
817 set_current_state(TASK_INTERRUPTIBLE
);
824 timeo
= schedule_timeout(timeo
);
825 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
827 if (sk
->sk_state
!= IUCV_LISTEN
) {
832 if (signal_pending(current
)) {
833 err
= sock_intr_errno(timeo
);
838 set_current_state(TASK_RUNNING
);
839 remove_wait_queue(sk_sleep(sk
), &wait
);
844 newsock
->state
= SS_CONNECTED
;
851 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
854 DECLARE_SOCKADDR(struct sockaddr_iucv
*, siucv
, addr
);
855 struct sock
*sk
= sock
->sk
;
856 struct iucv_sock
*iucv
= iucv_sk(sk
);
858 addr
->sa_family
= AF_IUCV
;
861 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
862 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
864 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
865 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
867 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
868 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
869 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
871 return sizeof(struct sockaddr_iucv
);
875 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
877 * @msg: Pointer to a struct iucv_message
878 * @skb: The socket data to send, skb->len MUST BE <= 7
880 * Send the socket data in the parameter list in the iucv message
881 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
882 * list and the socket data len at index 7 (last byte).
883 * See also iucv_msg_length().
885 * Returns the error code from the iucv_message_send() call.
887 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
892 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
893 prmdata
[7] = 0xff - (u8
) skb
->len
;
894 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
895 (void *) prmdata
, 8);
898 static int iucv_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
901 struct sock
*sk
= sock
->sk
;
902 struct iucv_sock
*iucv
= iucv_sk(sk
);
906 struct iucv_message txmsg
= {0};
907 struct cmsghdr
*cmsg
;
913 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
915 err
= sock_error(sk
);
919 if (msg
->msg_flags
& MSG_OOB
)
922 /* SOCK_SEQPACKET: we do not support segmented records */
923 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
928 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
933 /* Return if the socket is not in connected state */
934 if (sk
->sk_state
!= IUCV_CONNECTED
) {
939 /* initialize defaults */
940 cmsg_done
= 0; /* check for duplicate headers */
942 /* iterate over control messages */
943 for_each_cmsghdr(cmsg
, msg
) {
944 if (!CMSG_OK(msg
, cmsg
)) {
949 if (cmsg
->cmsg_level
!= SOL_IUCV
)
952 if (cmsg
->cmsg_type
& cmsg_done
) {
956 cmsg_done
|= cmsg
->cmsg_type
;
958 switch (cmsg
->cmsg_type
) {
959 case SCM_IUCV_TRGCLS
:
960 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
965 /* set iucv message target class */
967 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
977 /* allocate one skb for each iucv message:
978 * this is fine for SOCK_SEQPACKET (unless we want to support
979 * segmented records using the MSG_EOR flag), but
980 * for SOCK_STREAM we might want to improve it in future */
981 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
982 headroom
= sizeof(struct af_iucv_trans_hdr
) +
983 LL_RESERVED_SPACE(iucv
->hs_dev
);
984 linear
= min(len
, PAGE_SIZE
- headroom
);
986 if (len
< PAGE_SIZE
) {
989 /* In nonlinear "classic" iucv skb,
990 * reserve space for iucv_array
992 headroom
= sizeof(struct iucv_array
) *
994 linear
= PAGE_SIZE
- headroom
;
997 skb
= sock_alloc_send_pskb(sk
, headroom
+ linear
, len
- linear
,
1002 skb_reserve(skb
, headroom
);
1003 skb_put(skb
, linear
);
1005 skb
->data_len
= len
- linear
;
1006 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1010 /* wait if outstanding messages for iucv path has reached */
1011 timeo
= sock_sndtimeo(sk
, noblock
);
1012 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1016 /* return -ECONNRESET if the socket is no longer connected */
1017 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1022 /* increment and save iucv message tag for msg_completion cbk */
1023 txmsg
.tag
= iucv
->send_tag
++;
1024 IUCV_SKB_CB(skb
)->tag
= txmsg
.tag
;
1026 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1027 atomic_inc(&iucv
->msg_sent
);
1028 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1030 atomic_dec(&iucv
->msg_sent
);
1033 } else { /* Classic VM IUCV transport */
1034 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1035 atomic_inc(&iucv
->skbs_in_xmit
);
1037 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
) &&
1039 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1041 /* on success: there is no message_complete callback */
1042 /* for an IPRMDATA msg; remove skb from send queue */
1044 atomic_dec(&iucv
->skbs_in_xmit
);
1045 skb_unlink(skb
, &iucv
->send_skb_q
);
1049 /* this error should never happen since the */
1050 /* IUCV_IPRMDATA path flag is set... sever path */
1052 pr_iucv
->path_sever(iucv
->path
, NULL
);
1053 atomic_dec(&iucv
->skbs_in_xmit
);
1054 skb_unlink(skb
, &iucv
->send_skb_q
);
1058 } else if (skb_is_nonlinear(skb
)) {
1059 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1062 /* skip iucv_array lying in the headroom */
1063 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1064 iba
[0].length
= (u32
)skb_headlen(skb
);
1065 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1066 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1068 iba
[i
+ 1].address
=
1069 (u32
)(addr_t
)skb_frag_address(frag
);
1070 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1072 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1074 (void *)iba
, skb
->len
);
1075 } else { /* non-IPRM Linear skb */
1076 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1077 0, 0, (void *)skb
->data
, skb
->len
);
1082 memcpy(user_id
, iucv
->dst_user_id
, 8);
1084 memcpy(appl_id
, iucv
->dst_name
, 8);
1086 "Application %s on z/VM guest %s exceeds message limit\n",
1093 atomic_dec(&iucv
->skbs_in_xmit
);
1094 skb_unlink(skb
, &iucv
->send_skb_q
);
1109 static struct sk_buff
*alloc_iucv_recv_skb(unsigned long len
)
1111 size_t headroom
, linear
;
1112 struct sk_buff
*skb
;
1115 if (len
< PAGE_SIZE
) {
1119 headroom
= sizeof(struct iucv_array
) * (MAX_SKB_FRAGS
+ 1);
1120 linear
= PAGE_SIZE
- headroom
;
1122 skb
= alloc_skb_with_frags(headroom
+ linear
, len
- linear
,
1123 0, &err
, GFP_ATOMIC
| GFP_DMA
);
1125 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1129 skb_reserve(skb
, headroom
);
1130 skb_put(skb
, linear
);
1132 skb
->data_len
= len
- linear
;
1137 /* iucv_process_message() - Receive a single outstanding IUCV message
1139 * Locking: must be called with message_q.lock held
1141 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1142 struct iucv_path
*path
,
1143 struct iucv_message
*msg
)
1148 len
= iucv_msg_length(msg
);
1150 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1151 /* Note: the first 4 bytes are reserved for msg tag */
1152 IUCV_SKB_CB(skb
)->class = msg
->class;
1154 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1155 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1156 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1161 if (skb_is_nonlinear(skb
)) {
1162 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1165 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1166 iba
[0].length
= (u32
)skb_headlen(skb
);
1167 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1168 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1170 iba
[i
+ 1].address
=
1171 (u32
)(addr_t
)skb_frag_address(frag
);
1172 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1174 rc
= pr_iucv
->message_receive(path
, msg
,
1176 (void *)iba
, len
, NULL
);
1178 rc
= pr_iucv
->message_receive(path
, msg
,
1179 msg
->flags
& IUCV_IPRMDATA
,
1180 skb
->data
, len
, NULL
);
1186 WARN_ON_ONCE(skb
->len
!= len
);
1189 IUCV_SKB_CB(skb
)->offset
= 0;
1190 if (sk_filter(sk
, skb
)) {
1191 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
1195 if (__sock_queue_rcv_skb(sk
, skb
)) /* handle rcv queue full */
1196 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1199 /* iucv_process_message_q() - Process outstanding IUCV messages
1201 * Locking: must be called with message_q.lock held
1203 static void iucv_process_message_q(struct sock
*sk
)
1205 struct iucv_sock
*iucv
= iucv_sk(sk
);
1206 struct sk_buff
*skb
;
1207 struct sock_msg_q
*p
, *n
;
1209 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1210 skb
= alloc_iucv_recv_skb(iucv_msg_length(&p
->msg
));
1213 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1216 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1221 static int iucv_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1222 size_t len
, int flags
)
1224 struct sock
*sk
= sock
->sk
;
1225 struct iucv_sock
*iucv
= iucv_sk(sk
);
1226 unsigned int copied
, rlen
;
1227 struct sk_buff
*skb
, *rskb
, *cskb
;
1231 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1232 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1233 skb_queue_empty(&sk
->sk_receive_queue
) &&
1234 list_empty(&iucv
->message_q
.list
))
1237 if (flags
& (MSG_OOB
))
1240 /* receive/dequeue next skb:
1241 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1242 skb
= skb_recv_datagram(sk
, flags
, &err
);
1244 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1249 offset
= IUCV_SKB_CB(skb
)->offset
;
1250 rlen
= skb
->len
- offset
; /* real length of skb */
1251 copied
= min_t(unsigned int, rlen
, len
);
1253 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1256 if (skb_copy_datagram_msg(cskb
, offset
, msg
, copied
)) {
1257 if (!(flags
& MSG_PEEK
))
1258 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1262 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1263 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1265 msg
->msg_flags
|= MSG_TRUNC
;
1266 /* each iucv message contains a complete record */
1267 msg
->msg_flags
|= MSG_EOR
;
1270 /* create control message to store iucv msg target class:
1271 * get the trgcls from the control buffer of the skb due to
1272 * fragmentation of original iucv message. */
1273 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1274 sizeof(IUCV_SKB_CB(skb
)->class),
1275 (void *)&IUCV_SKB_CB(skb
)->class);
1277 if (!(flags
& MSG_PEEK
))
1278 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1282 /* Mark read part of skb as used */
1283 if (!(flags
& MSG_PEEK
)) {
1285 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1286 if (sk
->sk_type
== SOCK_STREAM
) {
1287 if (copied
< rlen
) {
1288 IUCV_SKB_CB(skb
)->offset
= offset
+ copied
;
1289 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1295 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1296 atomic_inc(&iucv
->msg_recv
);
1297 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1299 iucv_sock_close(sk
);
1304 /* Queue backlog skbs */
1305 spin_lock_bh(&iucv
->message_q
.lock
);
1306 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1308 IUCV_SKB_CB(rskb
)->offset
= 0;
1309 if (__sock_queue_rcv_skb(sk
, rskb
)) {
1310 /* handle rcv queue full */
1311 skb_queue_head(&iucv
->backlog_skb_q
,
1315 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1317 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1318 if (!list_empty(&iucv
->message_q
.list
))
1319 iucv_process_message_q(sk
);
1320 if (atomic_read(&iucv
->msg_recv
) >=
1321 iucv
->msglimit
/ 2) {
1322 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1324 sk
->sk_state
= IUCV_DISCONN
;
1325 sk
->sk_state_change(sk
);
1329 spin_unlock_bh(&iucv
->message_q
.lock
);
1333 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1334 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1340 static inline __poll_t
iucv_accept_poll(struct sock
*parent
)
1342 struct iucv_sock
*isk
, *n
;
1345 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1346 sk
= (struct sock
*) isk
;
1348 if (sk
->sk_state
== IUCV_CONNECTED
)
1349 return EPOLLIN
| EPOLLRDNORM
;
1355 static __poll_t
iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1358 struct sock
*sk
= sock
->sk
;
1361 sock_poll_wait(file
, sock
, wait
);
1363 if (sk
->sk_state
== IUCV_LISTEN
)
1364 return iucv_accept_poll(sk
);
1366 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1368 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
1370 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1373 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1376 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1377 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1378 mask
|= EPOLLIN
| EPOLLRDNORM
;
1380 if (sk
->sk_state
== IUCV_CLOSED
)
1383 if (sk
->sk_state
== IUCV_DISCONN
)
1386 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1387 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
1389 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1394 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1396 struct sock
*sk
= sock
->sk
;
1397 struct iucv_sock
*iucv
= iucv_sk(sk
);
1398 struct iucv_message txmsg
;
1403 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1407 switch (sk
->sk_state
) {
1418 if ((how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) &&
1419 sk
->sk_state
== IUCV_CONNECTED
) {
1420 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1423 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1424 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1439 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1442 sk
->sk_shutdown
|= how
;
1443 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1444 if ((iucv
->transport
== AF_IUCV_TRANS_IUCV
) &&
1446 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1449 /* skb_queue_purge(&sk->sk_receive_queue); */
1451 skb_queue_purge(&sk
->sk_receive_queue
);
1454 /* Wake up anyone sleeping in poll */
1455 sk
->sk_state_change(sk
);
1462 static int iucv_sock_release(struct socket
*sock
)
1464 struct sock
*sk
= sock
->sk
;
1470 iucv_sock_close(sk
);
1477 /* getsockopt and setsockopt */
1478 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1479 sockptr_t optval
, unsigned int optlen
)
1481 struct sock
*sk
= sock
->sk
;
1482 struct iucv_sock
*iucv
= iucv_sk(sk
);
1486 if (level
!= SOL_IUCV
)
1487 return -ENOPROTOOPT
;
1489 if (optlen
< sizeof(int))
1492 if (copy_from_sockptr(&val
, optval
, sizeof(int)))
1499 case SO_IPRMDATA_MSG
:
1501 iucv
->flags
|= IUCV_IPRMDATA
;
1503 iucv
->flags
&= ~IUCV_IPRMDATA
;
1506 switch (sk
->sk_state
) {
1509 if (val
< 1 || val
> U16_MAX
)
1512 iucv
->msglimit
= val
;
1528 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1529 char __user
*optval
, int __user
*optlen
)
1531 struct sock
*sk
= sock
->sk
;
1532 struct iucv_sock
*iucv
= iucv_sk(sk
);
1536 if (level
!= SOL_IUCV
)
1537 return -ENOPROTOOPT
;
1539 if (get_user(len
, optlen
))
1545 len
= min_t(unsigned int, len
, sizeof(int));
1548 case SO_IPRMDATA_MSG
:
1549 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1553 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1554 : iucv
->msglimit
; /* default */
1558 if (sk
->sk_state
== IUCV_OPEN
)
1560 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1561 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1565 return -ENOPROTOOPT
;
1568 if (put_user(len
, optlen
))
1570 if (copy_to_user(optval
, &val
, len
))
1577 /* Callback wrappers - called from iucv base support */
1578 static int iucv_callback_connreq(struct iucv_path
*path
,
1579 u8 ipvmid
[8], u8 ipuser
[16])
1581 unsigned char user_data
[16];
1582 unsigned char nuser_data
[16];
1583 unsigned char src_name
[8];
1584 struct sock
*sk
, *nsk
;
1585 struct iucv_sock
*iucv
, *niucv
;
1588 memcpy(src_name
, ipuser
, 8);
1589 EBCASC(src_name
, 8);
1590 /* Find out if this path belongs to af_iucv. */
1591 read_lock(&iucv_sk_list
.lock
);
1594 sk_for_each(sk
, &iucv_sk_list
.head
)
1595 if (sk
->sk_state
== IUCV_LISTEN
&&
1596 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1598 * Found a listening socket with
1599 * src_name == ipuser[0-7].
1604 read_unlock(&iucv_sk_list
.lock
);
1606 /* No socket found, not one of our paths. */
1611 /* Check if parent socket is listening */
1612 low_nmcpy(user_data
, iucv
->src_name
);
1613 high_nmcpy(user_data
, iucv
->dst_name
);
1614 ASCEBC(user_data
, sizeof(user_data
));
1615 if (sk
->sk_state
!= IUCV_LISTEN
) {
1616 err
= pr_iucv
->path_sever(path
, user_data
);
1617 iucv_path_free(path
);
1621 /* Check for backlog size */
1622 if (sk_acceptq_is_full(sk
)) {
1623 err
= pr_iucv
->path_sever(path
, user_data
);
1624 iucv_path_free(path
);
1628 /* Create the new socket */
1629 nsk
= iucv_sock_alloc(NULL
, sk
->sk_protocol
, GFP_ATOMIC
, 0);
1631 err
= pr_iucv
->path_sever(path
, user_data
);
1632 iucv_path_free(path
);
1636 niucv
= iucv_sk(nsk
);
1637 iucv_sock_init(nsk
, sk
);
1638 niucv
->transport
= AF_IUCV_TRANS_IUCV
;
1639 nsk
->sk_allocation
|= GFP_DMA
;
1641 /* Set the new iucv_sock */
1642 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1643 EBCASC(niucv
->dst_name
, 8);
1644 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1645 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1646 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1649 /* Call iucv_accept */
1650 high_nmcpy(nuser_data
, ipuser
+ 8);
1651 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1652 ASCEBC(nuser_data
+ 8, 8);
1654 /* set message limit for path based on msglimit of accepting socket */
1655 niucv
->msglimit
= iucv
->msglimit
;
1656 path
->msglim
= iucv
->msglimit
;
1657 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1659 iucv_sever_path(nsk
, 1);
1660 iucv_sock_kill(nsk
);
1664 iucv_accept_enqueue(sk
, nsk
);
1666 /* Wake up accept */
1667 nsk
->sk_state
= IUCV_CONNECTED
;
1668 sk
->sk_data_ready(sk
);
1675 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1677 struct sock
*sk
= path
->private;
1679 sk
->sk_state
= IUCV_CONNECTED
;
1680 sk
->sk_state_change(sk
);
1683 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1685 struct sock
*sk
= path
->private;
1686 struct iucv_sock
*iucv
= iucv_sk(sk
);
1687 struct sk_buff
*skb
;
1688 struct sock_msg_q
*save_msg
;
1691 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1692 pr_iucv
->message_reject(path
, msg
);
1696 spin_lock(&iucv
->message_q
.lock
);
1698 if (!list_empty(&iucv
->message_q
.list
) ||
1699 !skb_queue_empty(&iucv
->backlog_skb_q
))
1702 len
= atomic_read(&sk
->sk_rmem_alloc
);
1703 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1704 if (len
> sk
->sk_rcvbuf
)
1707 skb
= alloc_iucv_recv_skb(iucv_msg_length(msg
));
1711 iucv_process_message(sk
, skb
, path
, msg
);
1715 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1718 save_msg
->path
= path
;
1719 save_msg
->msg
= *msg
;
1721 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1724 spin_unlock(&iucv
->message_q
.lock
);
1727 static void iucv_callback_txdone(struct iucv_path
*path
,
1728 struct iucv_message
*msg
)
1730 struct sock
*sk
= path
->private;
1731 struct sk_buff
*this = NULL
;
1732 struct sk_buff_head
*list
;
1733 struct sk_buff
*list_skb
;
1734 struct iucv_sock
*iucv
;
1735 unsigned long flags
;
1738 list
= &iucv
->send_skb_q
;
1742 spin_lock_irqsave(&list
->lock
, flags
);
1743 skb_queue_walk(list
, list_skb
) {
1744 if (msg
->tag
== IUCV_SKB_CB(list_skb
)->tag
) {
1750 atomic_dec(&iucv
->skbs_in_xmit
);
1751 __skb_unlink(this, list
);
1754 spin_unlock_irqrestore(&list
->lock
, flags
);
1758 /* wake up any process waiting for sending */
1759 iucv_sock_wake_msglim(sk
);
1762 if (sk
->sk_state
== IUCV_CLOSING
) {
1763 if (atomic_read(&iucv
->skbs_in_xmit
) == 0) {
1764 sk
->sk_state
= IUCV_CLOSED
;
1765 sk
->sk_state_change(sk
);
1772 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1774 struct sock
*sk
= path
->private;
1776 if (sk
->sk_state
== IUCV_CLOSED
)
1780 iucv_sever_path(sk
, 1);
1781 sk
->sk_state
= IUCV_DISCONN
;
1783 sk
->sk_state_change(sk
);
1787 /* called if the other communication side shuts down its RECV direction;
1788 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1790 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1792 struct sock
*sk
= path
->private;
1795 if (sk
->sk_state
!= IUCV_CLOSED
) {
1796 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1797 sk
->sk_state_change(sk
);
1802 static struct iucv_handler af_iucv_handler
= {
1803 .path_pending
= iucv_callback_connreq
,
1804 .path_complete
= iucv_callback_connack
,
1805 .path_severed
= iucv_callback_connrej
,
1806 .message_pending
= iucv_callback_rx
,
1807 .message_complete
= iucv_callback_txdone
,
1808 .path_quiesced
= iucv_callback_shutdown
,
1811 /***************** HiperSockets transport callbacks ********************/
1812 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1814 struct af_iucv_trans_hdr
*trans_hdr
= iucv_trans_hdr(skb
);
1818 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1819 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1820 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1821 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1822 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1823 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1824 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1825 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1826 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1827 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1828 skb_push(skb
, ETH_HLEN
);
1829 memset(skb
->data
, 0, ETH_HLEN
);
1833 * afiucv_hs_callback_syn - react on received SYN
1835 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1837 struct af_iucv_trans_hdr
*trans_hdr
= iucv_trans_hdr(skb
);
1839 struct iucv_sock
*iucv
, *niucv
;
1844 /* no sock - connection refused */
1845 afiucv_swap_src_dest(skb
);
1846 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1847 err
= dev_queue_xmit(skb
);
1851 nsk
= iucv_sock_alloc(NULL
, sk
->sk_protocol
, GFP_ATOMIC
, 0);
1853 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1854 sk_acceptq_is_full(sk
) ||
1856 /* error on server socket - connection refused */
1857 afiucv_swap_src_dest(skb
);
1858 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1859 err
= dev_queue_xmit(skb
);
1860 iucv_sock_kill(nsk
);
1865 niucv
= iucv_sk(nsk
);
1866 iucv_sock_init(nsk
, sk
);
1867 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
1868 niucv
->msglimit
= iucv
->msglimit
;
1869 if (!trans_hdr
->window
)
1870 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
1872 niucv
->msglimit_peer
= trans_hdr
->window
;
1873 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
1874 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
1875 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1876 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1877 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
1878 niucv
->hs_dev
= iucv
->hs_dev
;
1879 dev_hold(niucv
->hs_dev
);
1880 afiucv_swap_src_dest(skb
);
1881 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
1882 trans_hdr
->window
= niucv
->msglimit
;
1883 /* if receiver acks the xmit connection is established */
1884 err
= dev_queue_xmit(skb
);
1886 iucv_accept_enqueue(sk
, nsk
);
1887 nsk
->sk_state
= IUCV_CONNECTED
;
1888 sk
->sk_data_ready(sk
);
1890 iucv_sock_kill(nsk
);
1894 return NET_RX_SUCCESS
;
1898 * afiucv_hs_callback_synack() - react on received SYN-ACK
1900 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
1902 struct iucv_sock
*iucv
= iucv_sk(sk
);
1904 if (!iucv
|| sk
->sk_state
!= IUCV_BOUND
) {
1906 return NET_RX_SUCCESS
;
1910 iucv
->msglimit_peer
= iucv_trans_hdr(skb
)->window
;
1911 sk
->sk_state
= IUCV_CONNECTED
;
1912 sk
->sk_state_change(sk
);
1915 return NET_RX_SUCCESS
;
1919 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1921 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
1923 struct iucv_sock
*iucv
= iucv_sk(sk
);
1925 if (!iucv
|| sk
->sk_state
!= IUCV_BOUND
) {
1927 return NET_RX_SUCCESS
;
1931 sk
->sk_state
= IUCV_DISCONN
;
1932 sk
->sk_state_change(sk
);
1935 return NET_RX_SUCCESS
;
1939 * afiucv_hs_callback_fin() - react on received FIN
1941 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
1943 struct iucv_sock
*iucv
= iucv_sk(sk
);
1945 /* other end of connection closed */
1948 return NET_RX_SUCCESS
;
1952 if (sk
->sk_state
== IUCV_CONNECTED
) {
1953 sk
->sk_state
= IUCV_DISCONN
;
1954 sk
->sk_state_change(sk
);
1958 return NET_RX_SUCCESS
;
1962 * afiucv_hs_callback_win() - react on received WIN
1964 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
1966 struct iucv_sock
*iucv
= iucv_sk(sk
);
1969 return NET_RX_SUCCESS
;
1971 if (sk
->sk_state
!= IUCV_CONNECTED
)
1972 return NET_RX_SUCCESS
;
1974 atomic_sub(iucv_trans_hdr(skb
)->window
, &iucv
->msg_sent
);
1975 iucv_sock_wake_msglim(sk
);
1976 return NET_RX_SUCCESS
;
1980 * afiucv_hs_callback_rx() - react on received data
1982 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
1984 struct iucv_sock
*iucv
= iucv_sk(sk
);
1988 return NET_RX_SUCCESS
;
1991 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1993 return NET_RX_SUCCESS
;
1996 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1998 return NET_RX_SUCCESS
;
2001 /* write stuff from iucv_msg to skb cb */
2002 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2003 skb_reset_transport_header(skb
);
2004 skb_reset_network_header(skb
);
2005 IUCV_SKB_CB(skb
)->offset
= 0;
2006 if (sk_filter(sk
, skb
)) {
2007 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
2009 return NET_RX_SUCCESS
;
2012 spin_lock(&iucv
->message_q
.lock
);
2013 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2014 if (__sock_queue_rcv_skb(sk
, skb
))
2015 /* handle rcv queue full */
2016 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2018 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2019 spin_unlock(&iucv
->message_q
.lock
);
2020 return NET_RX_SUCCESS
;
2024 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2026 * called from netif RX softirq
2028 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2029 struct packet_type
*pt
, struct net_device
*orig_dev
)
2032 struct iucv_sock
*iucv
;
2033 struct af_iucv_trans_hdr
*trans_hdr
;
2034 int err
= NET_RX_SUCCESS
;
2037 if (!pskb_may_pull(skb
, sizeof(*trans_hdr
))) {
2039 return NET_RX_SUCCESS
;
2042 trans_hdr
= iucv_trans_hdr(skb
);
2043 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2044 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2045 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2046 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2047 memset(nullstring
, 0, sizeof(nullstring
));
2050 read_lock(&iucv_sk_list
.lock
);
2051 sk_for_each(sk
, &iucv_sk_list
.head
) {
2052 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2053 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2054 trans_hdr
->destAppName
, 8)) &&
2055 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2056 trans_hdr
->destUserID
, 8)) &&
2057 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2058 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2064 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2065 trans_hdr
->destAppName
, 8)) &&
2066 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2067 trans_hdr
->destUserID
, 8)) &&
2068 (!memcmp(&iucv_sk(sk
)->dst_name
,
2069 trans_hdr
->srcAppName
, 8)) &&
2070 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2071 trans_hdr
->srcUserID
, 8))) {
2077 read_unlock(&iucv_sk_list
.lock
);
2082 how should we send with no sock
2083 1) send without sock no send rc checking?
2084 2) introduce default sock to handle this cases
2086 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2088 SYN|ACK, SYN|FIN, FIN -> no action? */
2090 switch (trans_hdr
->flags
) {
2091 case AF_IUCV_FLAG_SYN
:
2092 /* connect request */
2093 err
= afiucv_hs_callback_syn(sk
, skb
);
2095 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2096 /* connect request confirmed */
2097 err
= afiucv_hs_callback_synack(sk
, skb
);
2099 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2100 /* connect request refused */
2101 err
= afiucv_hs_callback_synfin(sk
, skb
);
2103 case (AF_IUCV_FLAG_FIN
):
2105 err
= afiucv_hs_callback_fin(sk
, skb
);
2107 case (AF_IUCV_FLAG_WIN
):
2108 err
= afiucv_hs_callback_win(sk
, skb
);
2109 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2113 fallthrough
; /* and receive non-zero length data */
2114 case (AF_IUCV_FLAG_SHT
):
2115 /* shutdown request */
2116 fallthrough
; /* and receive zero length data */
2118 /* plain data frame */
2119 IUCV_SKB_CB(skb
)->class = trans_hdr
->iucv_hdr
.class;
2120 err
= afiucv_hs_callback_rx(sk
, skb
);
2130 * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
2133 static void afiucv_hs_callback_txnotify(struct sock
*sk
, enum iucv_tx_notify n
)
2135 struct iucv_sock
*iucv
= iucv_sk(sk
);
2137 if (sock_flag(sk
, SOCK_ZAPPED
))
2142 atomic_dec(&iucv
->skbs_in_xmit
);
2143 iucv_sock_wake_msglim(sk
);
2145 case TX_NOTIFY_PENDING
:
2146 atomic_inc(&iucv
->pendings
);
2148 case TX_NOTIFY_DELAYED_OK
:
2149 atomic_dec(&iucv
->skbs_in_xmit
);
2150 if (atomic_dec_return(&iucv
->pendings
) <= 0)
2151 iucv_sock_wake_msglim(sk
);
2154 atomic_dec(&iucv
->skbs_in_xmit
);
2155 if (sk
->sk_state
== IUCV_CONNECTED
) {
2156 sk
->sk_state
= IUCV_DISCONN
;
2157 sk
->sk_state_change(sk
);
2161 if (sk
->sk_state
== IUCV_CLOSING
) {
2162 if (atomic_read(&iucv
->skbs_in_xmit
) == 0) {
2163 sk
->sk_state
= IUCV_CLOSED
;
2164 sk
->sk_state_change(sk
);
2170 * afiucv_netdev_event: handle netdev notifier chain events
2172 static int afiucv_netdev_event(struct notifier_block
*this,
2173 unsigned long event
, void *ptr
)
2175 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2177 struct iucv_sock
*iucv
;
2181 case NETDEV_GOING_DOWN
:
2182 sk_for_each(sk
, &iucv_sk_list
.head
) {
2184 if ((iucv
->hs_dev
== event_dev
) &&
2185 (sk
->sk_state
== IUCV_CONNECTED
)) {
2186 if (event
== NETDEV_GOING_DOWN
)
2187 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2188 sk
->sk_state
= IUCV_DISCONN
;
2189 sk
->sk_state_change(sk
);
2194 case NETDEV_UNREGISTER
:
2201 static struct notifier_block afiucv_netdev_notifier
= {
2202 .notifier_call
= afiucv_netdev_event
,
2205 static const struct proto_ops iucv_sock_ops
= {
2207 .owner
= THIS_MODULE
,
2208 .release
= iucv_sock_release
,
2209 .bind
= iucv_sock_bind
,
2210 .connect
= iucv_sock_connect
,
2211 .listen
= iucv_sock_listen
,
2212 .accept
= iucv_sock_accept
,
2213 .getname
= iucv_sock_getname
,
2214 .sendmsg
= iucv_sock_sendmsg
,
2215 .recvmsg
= iucv_sock_recvmsg
,
2216 .poll
= iucv_sock_poll
,
2217 .ioctl
= sock_no_ioctl
,
2218 .mmap
= sock_no_mmap
,
2219 .socketpair
= sock_no_socketpair
,
2220 .shutdown
= iucv_sock_shutdown
,
2221 .setsockopt
= iucv_sock_setsockopt
,
2222 .getsockopt
= iucv_sock_getsockopt
,
2225 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
2230 if (protocol
&& protocol
!= PF_IUCV
)
2231 return -EPROTONOSUPPORT
;
2233 sock
->state
= SS_UNCONNECTED
;
2235 switch (sock
->type
) {
2237 case SOCK_SEQPACKET
:
2238 /* currently, proto ops can handle both sk types */
2239 sock
->ops
= &iucv_sock_ops
;
2242 return -ESOCKTNOSUPPORT
;
2245 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
, kern
);
2249 iucv_sock_init(sk
, NULL
);
2254 static const struct net_proto_family iucv_sock_family_ops
= {
2256 .owner
= THIS_MODULE
,
2257 .create
= iucv_sock_create
,
2260 static struct packet_type iucv_packet_type
= {
2261 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2262 .func
= afiucv_hs_rcv
,
2265 static int __init
afiucv_init(void)
2269 if (MACHINE_IS_VM
&& IS_ENABLED(CONFIG_IUCV
)) {
2270 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2271 if (unlikely(err
)) {
2273 err
= -EPROTONOSUPPORT
;
2279 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2283 err
= proto_register(&iucv_proto
, 0);
2286 err
= sock_register(&iucv_sock_family_ops
);
2291 err
= pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2296 err
= register_netdevice_notifier(&afiucv_netdev_notifier
);
2300 dev_add_pack(&iucv_packet_type
);
2305 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2307 sock_unregister(PF_IUCV
);
2309 proto_unregister(&iucv_proto
);
2314 static void __exit
afiucv_exit(void)
2317 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2319 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2320 dev_remove_pack(&iucv_packet_type
);
2321 sock_unregister(PF_IUCV
);
2322 proto_unregister(&iucv_proto
);
2325 module_init(afiucv_init
);
2326 module_exit(afiucv_exit
);
2328 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2329 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2330 MODULE_VERSION(VERSION
);
2331 MODULE_LICENSE("GPL");
2332 MODULE_ALIAS_NETPROTO(PF_IUCV
);