]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - net/vmw_vsock/virtio_transport_common.c
Merge tag 'loongarch-kvm-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhu...
[thirdparty/kernel/stable.git] / net / vmw_vsock / virtio_transport_common.c
CommitLineData
7a338472 1// SPDX-License-Identifier: GPL-2.0-only
06a8fc78
AH
2/*
3 * common code for virtio vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
06a8fc78
AH
8 */
9#include <linux/spinlock.h>
10#include <linux/module.h>
174cd4b1 11#include <linux/sched/signal.h>
06a8fc78
AH
12#include <linux/ctype.h>
13#include <linux/list.h>
06a8fc78 14#include <linux/virtio_vsock.h>
82dfb540 15#include <uapi/linux/vsockmon.h>
06a8fc78
AH
16
17#include <net/sock.h>
18#include <net/af_vsock.h>
19
20#define CREATE_TRACE_POINTS
21#include <trace/events/vsock_virtio_transport_common.h>
22
23/* How long to wait for graceful shutdown of a connection */
24#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
25
473c7391
SG
26/* Threshold for detecting small packets to copy */
27#define GOOD_COPY_LEN 128
28
daabfbca
SG
29static const struct virtio_transport *
30virtio_transport_get_ops(struct vsock_sock *vsk)
06a8fc78 31{
daabfbca 32 const struct vsock_transport *t = vsock_core_get_transport(vsk);
06a8fc78 33
4aaf5961
SG
34 if (WARN_ON(!t))
35 return NULL;
36
06a8fc78
AH
37 return container_of(t, struct virtio_transport, transport);
38}
39
581512a6
AK
40static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops,
41 struct virtio_vsock_pkt_info *info,
42 size_t pkt_len)
43{
44 struct iov_iter *iov_iter;
06a8fc78 45
581512a6
AK
46 if (!info->msg)
47 return false;
06a8fc78 48
581512a6 49 iov_iter = &info->msg->msg_iter;
06a8fc78 50
581512a6
AK
51 if (iov_iter->iov_offset)
52 return false;
9ac841f5 53
581512a6
AK
54 /* We can't send whole iov. */
55 if (iov_iter->count > pkt_len)
56 return false;
8d5ac871 57
581512a6
AK
58 /* Check that transport can send data in zerocopy mode. */
59 t_ops = virtio_transport_get_ops(info->vsk);
60
61 if (t_ops->can_msgzerocopy) {
b0a930e8 62 int pages_to_send = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
581512a6
AK
63
64 /* +1 is for packet header. */
65 return t_ops->can_msgzerocopy(pages_to_send + 1);
06a8fc78
AH
66 }
67
581512a6
AK
68 return true;
69}
71dc9ec9 70
581512a6
AK
71static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk,
72 struct sk_buff *skb,
73 struct msghdr *msg,
74 bool zerocopy)
75{
76 struct ubuf_info *uarg;
77
78 if (msg->msg_ubuf) {
79 uarg = msg->msg_ubuf;
80 net_zcopy_get(uarg);
81 } else {
82 struct iov_iter *iter = &msg->msg_iter;
83 struct ubuf_info_msgzc *uarg_zc;
06a8fc78 84
581512a6
AK
85 uarg = msg_zerocopy_realloc(sk_vsock(vsk),
86 iter->count,
87 NULL);
88 if (!uarg)
89 return -1;
90
91 uarg_zc = uarg_to_msgzc(uarg);
92 uarg_zc->zerocopy = zerocopy ? 1 : 0;
f9d2b1e1
BE
93 }
94
581512a6 95 skb_zcopy_init(skb, uarg);
06a8fc78 96
581512a6
AK
97 return 0;
98}
99
100static int virtio_transport_fill_skb(struct sk_buff *skb,
101 struct virtio_vsock_pkt_info *info,
102 size_t len,
103 bool zcopy)
104{
105 if (zcopy)
106 return __zerocopy_sg_from_iter(info->msg, NULL, skb,
107 &info->msg->msg_iter,
108 len);
109
110 return memcpy_from_msg(skb_put(skb, len), info->msg, len);
111}
112
113static void virtio_transport_init_hdr(struct sk_buff *skb,
114 struct virtio_vsock_pkt_info *info,
115 size_t payload_len,
116 u32 src_cid,
117 u32 src_port,
118 u32 dst_cid,
119 u32 dst_port)
120{
121 struct virtio_vsock_hdr *hdr;
122
123 hdr = virtio_vsock_hdr(skb);
124 hdr->type = cpu_to_le16(info->type);
125 hdr->op = cpu_to_le16(info->op);
126 hdr->src_cid = cpu_to_le64(src_cid);
127 hdr->dst_cid = cpu_to_le64(dst_cid);
128 hdr->src_port = cpu_to_le32(src_port);
129 hdr->dst_port = cpu_to_le32(dst_port);
130 hdr->flags = cpu_to_le32(info->flags);
131 hdr->len = cpu_to_le32(payload_len);
34c4effa
SY
132 hdr->buf_alloc = cpu_to_le32(0);
133 hdr->fwd_cnt = cpu_to_le32(0);
06a8fc78 134}
06a8fc78 135
4b0bf10e
AK
136static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
137 void *dst,
138 size_t len)
139{
140 struct iov_iter iov_iter = { 0 };
141 struct kvec kvec;
142 size_t to_copy;
143
144 kvec.iov_base = dst;
145 kvec.iov_len = len;
146
147 iov_iter.iter_type = ITER_KVEC;
148 iov_iter.kvec = &kvec;
149 iov_iter.nr_segs = 1;
150
151 to_copy = min_t(size_t, len, skb->len);
152
153 skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
154 &iov_iter, to_copy);
155}
156
82dfb540
GG
157/* Packet capture */
158static struct sk_buff *virtio_transport_build_skb(void *opaque)
159{
71dc9ec9
BE
160 struct virtio_vsock_hdr *pkt_hdr;
161 struct sk_buff *pkt = opaque;
82dfb540
GG
162 struct af_vsockmon_hdr *hdr;
163 struct sk_buff *skb;
6dbd3e66 164 size_t payload_len;
82dfb540 165
6dbd3e66
SG
166 /* A packet could be split to fit the RX buffer, so we can retrieve
167 * the payload length from the header and the buffer pointer taking
168 * care of the offset in the original packet.
169 */
71dc9ec9
BE
170 pkt_hdr = virtio_vsock_hdr(pkt);
171 payload_len = pkt->len;
6dbd3e66 172
71dc9ec9 173 skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
82dfb540
GG
174 GFP_ATOMIC);
175 if (!skb)
176 return NULL;
177
4df864c1 178 hdr = skb_put(skb, sizeof(*hdr));
82dfb540
GG
179
180 /* pkt->hdr is little-endian so no need to byteswap here */
71dc9ec9
BE
181 hdr->src_cid = pkt_hdr->src_cid;
182 hdr->src_port = pkt_hdr->src_port;
183 hdr->dst_cid = pkt_hdr->dst_cid;
184 hdr->dst_port = pkt_hdr->dst_port;
82dfb540
GG
185
186 hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
71dc9ec9 187 hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
82dfb540
GG
188 memset(hdr->reserved, 0, sizeof(hdr->reserved));
189
71dc9ec9 190 switch (le16_to_cpu(pkt_hdr->op)) {
82dfb540
GG
191 case VIRTIO_VSOCK_OP_REQUEST:
192 case VIRTIO_VSOCK_OP_RESPONSE:
193 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
194 break;
195 case VIRTIO_VSOCK_OP_RST:
196 case VIRTIO_VSOCK_OP_SHUTDOWN:
197 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
198 break;
199 case VIRTIO_VSOCK_OP_RW:
200 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
201 break;
202 case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
203 case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
204 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
205 break;
206 default:
207 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
208 break;
209 }
210
71dc9ec9 211 skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
82dfb540 212
6dbd3e66 213 if (payload_len) {
4b0bf10e
AK
214 if (skb_is_nonlinear(pkt)) {
215 void *data = skb_put(skb, payload_len);
216
217 virtio_transport_copy_nonlinear_skb(pkt, data, payload_len);
218 } else {
219 skb_put_data(skb, pkt->data, payload_len);
220 }
82dfb540
GG
221 }
222
223 return skb;
224}
225
71dc9ec9 226void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
82dfb540 227{
71dc9ec9 228 if (virtio_vsock_skb_tap_delivered(skb))
a78d1639
SG
229 return;
230
71dc9ec9
BE
231 vsock_deliver_tap(virtio_transport_build_skb, skb);
232 virtio_vsock_skb_set_tap_delivered(skb);
82dfb540
GG
233}
234EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
235
e4b1ef15
AK
236static u16 virtio_transport_get_type(struct sock *sk)
237{
238 if (sk->sk_type == SOCK_STREAM)
239 return VIRTIO_VSOCK_TYPE_STREAM;
240 else
241 return VIRTIO_VSOCK_TYPE_SEQPACKET;
242}
243
581512a6
AK
244/* Returns new sk_buff on success, otherwise returns NULL. */
245static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
246 size_t payload_len,
247 bool zcopy,
248 u32 src_cid,
249 u32 src_port,
250 u32 dst_cid,
251 u32 dst_port)
252{
253 struct vsock_sock *vsk;
254 struct sk_buff *skb;
255 size_t skb_len;
256
257 skb_len = VIRTIO_VSOCK_SKB_HEADROOM;
258
259 if (!zcopy)
260 skb_len += payload_len;
261
262 skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
263 if (!skb)
264 return NULL;
265
266 virtio_transport_init_hdr(skb, info, payload_len, src_cid, src_port,
267 dst_cid, dst_port);
268
269 vsk = info->vsk;
270
271 /* If 'vsk' != NULL then payload is always present, so we
272 * will never call '__zerocopy_sg_from_iter()' below without
273 * setting skb owner in 'skb_set_owner_w()'. The only case
274 * when 'vsk' == NULL is VIRTIO_VSOCK_OP_RST control message
275 * without payload.
276 */
277 WARN_ON_ONCE(!(vsk && (info->msg && payload_len)) && zcopy);
278
279 /* Set owner here, because '__zerocopy_sg_from_iter()' uses
280 * owner of skb without check to update 'sk_wmem_alloc'.
281 */
282 if (vsk)
283 skb_set_owner_w(skb, sk_vsock(vsk));
284
285 if (info->msg && payload_len > 0) {
286 int err;
287
288 err = virtio_transport_fill_skb(skb, info, payload_len, zcopy);
289 if (err)
290 goto out;
291
292 if (msg_data_left(info->msg) == 0 &&
293 info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
294 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
295
296 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
297
298 if (info->msg->msg_flags & MSG_EOR)
299 hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
300 }
301 }
302
303 if (info->reply)
304 virtio_vsock_skb_set_reply(skb);
305
306 trace_virtio_transport_alloc_pkt(src_cid, src_port,
307 dst_cid, dst_port,
308 payload_len,
309 info->type,
310 info->op,
311 info->flags,
312 zcopy);
313
314 return skb;
315out:
316 kfree_skb(skb);
317 return NULL;
318}
319
4aaf5961
SG
320/* This function can only be used on connecting/connected sockets,
321 * since a socket assigned to a transport is required.
322 *
323 * Do not use on listener sockets!
324 */
06a8fc78
AH
325static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
326 struct virtio_vsock_pkt_info *info)
327{
581512a6 328 u32 max_skb_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
06a8fc78 329 u32 src_cid, src_port, dst_cid, dst_port;
4aaf5961 330 const struct virtio_transport *t_ops;
06a8fc78 331 struct virtio_vsock_sock *vvs;
06a8fc78 332 u32 pkt_len = info->pkt_len;
581512a6 333 bool can_zcopy = false;
b68ffb1b
AK
334 u32 rest_len;
335 int ret;
06a8fc78 336
9ac841f5 337 info->type = virtio_transport_get_type(sk_vsock(vsk));
b93f8877 338
4aaf5961
SG
339 t_ops = virtio_transport_get_ops(vsk);
340 if (unlikely(!t_ops))
341 return -EFAULT;
342
343 src_cid = t_ops->transport.get_local_cid();
06a8fc78
AH
344 src_port = vsk->local_addr.svm_port;
345 if (!info->remote_cid) {
346 dst_cid = vsk->remote_addr.svm_cid;
347 dst_port = vsk->remote_addr.svm_port;
348 } else {
349 dst_cid = info->remote_cid;
350 dst_port = info->remote_port;
351 }
352
353 vvs = vsk->trans;
354
06a8fc78
AH
355 /* virtio_transport_get_credit might return less than pkt_len credit */
356 pkt_len = virtio_transport_get_credit(vvs, pkt_len);
357
358 /* Do not send zero length OP_RW pkt */
359 if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
360 return pkt_len;
361
581512a6
AK
362 if (info->msg) {
363 /* If zerocopy is not enabled by 'setsockopt()', we behave as
364 * there is no MSG_ZEROCOPY flag set.
365 */
366 if (!sock_flag(sk_vsock(vsk), SOCK_ZEROCOPY))
367 info->msg->msg_flags &= ~MSG_ZEROCOPY;
368
369 if (info->msg->msg_flags & MSG_ZEROCOPY)
370 can_zcopy = virtio_transport_can_zcopy(t_ops, info, pkt_len);
371
372 if (can_zcopy)
373 max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
374 (MAX_SKB_FRAGS * PAGE_SIZE));
375 }
376
b68ffb1b
AK
377 rest_len = pkt_len;
378
379 do {
380 struct sk_buff *skb;
381 size_t skb_len;
382
581512a6 383 skb_len = min(max_skb_len, rest_len);
b68ffb1b 384
581512a6 385 skb = virtio_transport_alloc_skb(info, skb_len, can_zcopy,
b68ffb1b
AK
386 src_cid, src_port,
387 dst_cid, dst_port);
388 if (!skb) {
389 ret = -ENOMEM;
390 break;
391 }
392
581512a6
AK
393 /* We process buffer part by part, allocating skb on
394 * each iteration. If this is last skb for this buffer
395 * and MSG_ZEROCOPY mode is in use - we must allocate
396 * completion for the current syscall.
397 */
398 if (info->msg && info->msg->msg_flags & MSG_ZEROCOPY &&
399 skb_len == rest_len && info->op == VIRTIO_VSOCK_OP_RW) {
400 if (virtio_transport_init_zcopy_skb(vsk, skb,
401 info->msg,
402 can_zcopy)) {
403 ret = -ENOMEM;
404 break;
405 }
406 }
407
b68ffb1b
AK
408 virtio_transport_inc_tx_pkt(vvs, skb);
409
410 ret = t_ops->send_pkt(skb);
411 if (ret < 0)
412 break;
413
414 /* Both virtio and vhost 'send_pkt()' returns 'skb_len',
415 * but for reliability use 'ret' instead of 'skb_len'.
416 * Also if partial send happens (e.g. 'ret' != 'skb_len')
417 * somehow, we break this loop, but account such returned
418 * value in 'virtio_transport_put_credit()'.
419 */
420 rest_len -= ret;
421
422 if (WARN_ONCE(ret != skb_len,
423 "'send_pkt()' returns %i, but %zu expected\n",
424 ret, skb_len))
425 break;
426 } while (rest_len);
427
428 virtio_transport_put_credit(vvs, rest_len);
06a8fc78 429
b68ffb1b
AK
430 /* Return number of bytes, if any data has been sent. */
431 if (rest_len != pkt_len)
432 ret = pkt_len - rest_len;
06a8fc78 433
b68ffb1b 434 return ret;
06a8fc78
AH
435}
436
ae6fcfbf 437static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
07770616 438 u32 len)
06a8fc78 439{
07770616 440 if (vvs->rx_bytes + len > vvs->buf_alloc)
ae6fcfbf
SG
441 return false;
442
07770616 443 vvs->rx_bytes += len;
ae6fcfbf 444 return true;
06a8fc78
AH
445}
446
447static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
07770616 448 u32 len)
06a8fc78 449{
71dc9ec9
BE
450 vvs->rx_bytes -= len;
451 vvs->fwd_cnt += len;
06a8fc78
AH
452}
453
71dc9ec9 454void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
06a8fc78 455{
71dc9ec9
BE
456 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
457
9632e9f6 458 spin_lock_bh(&vvs->rx_lock);
b89d882d 459 vvs->last_fwd_cnt = vvs->fwd_cnt;
71dc9ec9
BE
460 hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
461 hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
9632e9f6 462 spin_unlock_bh(&vvs->rx_lock);
06a8fc78
AH
463}
464EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
465
466u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
467{
468 u32 ret;
469
e3ec366e
AK
470 if (!credit)
471 return 0;
472
06a8fc78
AH
473 spin_lock_bh(&vvs->tx_lock);
474 ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
475 if (ret > credit)
476 ret = credit;
477 vvs->tx_cnt += ret;
478 spin_unlock_bh(&vvs->tx_lock);
479
480 return ret;
481}
482EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
483
484void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
485{
e3ec366e
AK
486 if (!credit)
487 return;
488
06a8fc78
AH
489 spin_lock_bh(&vvs->tx_lock);
490 vvs->tx_cnt -= credit;
491 spin_unlock_bh(&vvs->tx_lock);
492}
493EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
494
c10844c5 495static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
06a8fc78
AH
496{
497 struct virtio_vsock_pkt_info info = {
498 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
36d277ba 499 .vsk = vsk,
06a8fc78
AH
500 };
501
502 return virtio_transport_send_pkt_info(vsk, &info);
503}
504
a786ab36
MEVL
505static ssize_t
506virtio_transport_stream_do_peek(struct vsock_sock *vsk,
507 struct msghdr *msg,
508 size_t len)
509{
510 struct virtio_vsock_sock *vvs = vsk->trans;
051e77e3
AK
511 struct sk_buff *skb;
512 size_t total = 0;
513 int err;
a786ab36
MEVL
514
515 spin_lock_bh(&vvs->rx_lock);
516
051e77e3
AK
517 skb_queue_walk(&vvs->rx_queue, skb) {
518 size_t bytes;
a786ab36 519
051e77e3
AK
520 bytes = len - total;
521 if (bytes > skb->len)
522 bytes = skb->len;
a786ab36 523
051e77e3 524 spin_unlock_bh(&vvs->rx_lock);
a786ab36 525
051e77e3 526 /* sk_lock is held by caller so no one else can dequeue.
0df7cd3c 527 * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
051e77e3 528 */
0df7cd3c
AK
529 err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
530 &msg->msg_iter, bytes);
051e77e3
AK
531 if (err)
532 goto out;
a786ab36 533
051e77e3 534 total += bytes;
a786ab36 535
051e77e3 536 spin_lock_bh(&vvs->rx_lock);
a786ab36 537
051e77e3
AK
538 if (total == len)
539 break;
a786ab36
MEVL
540 }
541
542 spin_unlock_bh(&vvs->rx_lock);
543
544 return total;
545
546out:
547 if (total)
548 err = total;
549 return err;
550}
551
06a8fc78
AH
552static ssize_t
553virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
554 struct msghdr *msg,
555 size_t len)
556{
557 struct virtio_vsock_sock *vvs = vsk->trans;
06a8fc78 558 size_t bytes, total = 0;
71dc9ec9 559 struct sk_buff *skb;
06a8fc78 560 int err = -EFAULT;
71dc9ec9 561 u32 free_space;
06a8fc78
AH
562
563 spin_lock_bh(&vvs->rx_lock);
b8d2f61f
AK
564
565 if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes,
566 "rx_queue is empty, but rx_bytes is non-zero\n")) {
567 spin_unlock_bh(&vvs->rx_lock);
568 return err;
569 }
570
71dc9ec9 571 while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
8daaf39f 572 skb = skb_peek(&vvs->rx_queue);
06a8fc78 573
0df7cd3c
AK
574 bytes = min_t(size_t, len - total,
575 skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset);
06a8fc78
AH
576
577 /* sk_lock is held by caller so no one else can dequeue.
0df7cd3c 578 * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
06a8fc78
AH
579 */
580 spin_unlock_bh(&vvs->rx_lock);
581
0df7cd3c
AK
582 err = skb_copy_datagram_iter(skb,
583 VIRTIO_VSOCK_SKB_CB(skb)->offset,
584 &msg->msg_iter, bytes);
06a8fc78
AH
585 if (err)
586 goto out;
587
588 spin_lock_bh(&vvs->rx_lock);
589
590 total += bytes;
71dc9ec9 591
0df7cd3c
AK
592 VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
593
594 if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
07770616
AK
595 u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
596
597 virtio_transport_dec_rx_pkt(vvs, pkt_len);
8daaf39f 598 __skb_unlink(skb, &vvs->rx_queue);
71dc9ec9 599 consume_skb(skb);
06a8fc78
AH
600 }
601 }
b89d882d
SG
602
603 free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
604
06a8fc78
AH
605 spin_unlock_bh(&vvs->rx_lock);
606
f4d7c8e3
MT
607 /* To reduce the number of credit update messages,
608 * don't update credits as long as lots of space is available.
609 * Note: the limit chosen here is arbitrary. Setting the limit
610 * too high causes extra messages. Too low causes transmitter
611 * stalls. As stalls are in theory more expensive than extra
612 * messages, we set the limit to a high value. TODO: experiment
613 * with different values.
b89d882d 614 */
b93f8877 615 if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
c10844c5 616 virtio_transport_send_credit_update(vsk);
06a8fc78
AH
617
618 return total;
619
620out:
621 if (total)
622 err = total;
623 return err;
624}
625
a75f501d
AK
626static ssize_t
627virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk,
628 struct msghdr *msg)
629{
630 struct virtio_vsock_sock *vvs = vsk->trans;
631 struct sk_buff *skb;
632 size_t total, len;
633
634 spin_lock_bh(&vvs->rx_lock);
635
636 if (!vvs->msg_count) {
637 spin_unlock_bh(&vvs->rx_lock);
638 return 0;
639 }
640
641 total = 0;
642 len = msg_data_left(msg);
643
644 skb_queue_walk(&vvs->rx_queue, skb) {
645 struct virtio_vsock_hdr *hdr;
646
647 if (total < len) {
648 size_t bytes;
649 int err;
650
651 bytes = len - total;
652 if (bytes > skb->len)
653 bytes = skb->len;
654
655 spin_unlock_bh(&vvs->rx_lock);
656
657 /* sk_lock is held by caller so no one else can dequeue.
0df7cd3c 658 * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
a75f501d 659 */
0df7cd3c
AK
660 err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
661 &msg->msg_iter, bytes);
a75f501d
AK
662 if (err)
663 return err;
664
665 spin_lock_bh(&vvs->rx_lock);
666 }
667
668 total += skb->len;
669 hdr = virtio_vsock_hdr(skb);
670
671 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
672 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
673 msg->msg_flags |= MSG_EOR;
674
675 break;
676 }
677 }
678
679 spin_unlock_bh(&vvs->rx_lock);
680
681 return total;
682}
683
44931195
AK
684static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
685 struct msghdr *msg,
686 int flags)
687{
688 struct virtio_vsock_sock *vvs = vsk->trans;
44931195
AK
689 int dequeued_len = 0;
690 size_t user_buf_len = msg_data_left(msg);
44931195 691 bool msg_ready = false;
71dc9ec9 692 struct sk_buff *skb;
44931195
AK
693
694 spin_lock_bh(&vvs->rx_lock);
695
696 if (vvs->msg_count == 0) {
697 spin_unlock_bh(&vvs->rx_lock);
698 return 0;
699 }
700
701 while (!msg_ready) {
71dc9ec9 702 struct virtio_vsock_hdr *hdr;
07770616 703 size_t pkt_len;
71dc9ec9
BE
704
705 skb = __skb_dequeue(&vvs->rx_queue);
706 if (!skb)
707 break;
708 hdr = virtio_vsock_hdr(skb);
07770616 709 pkt_len = (size_t)le32_to_cpu(hdr->len);
44931195 710
91aa49a8 711 if (dequeued_len >= 0) {
44931195
AK
712 size_t bytes_to_copy;
713
44931195
AK
714 bytes_to_copy = min(user_buf_len, pkt_len);
715
716 if (bytes_to_copy) {
717 int err;
718
719 /* sk_lock is held by caller so no one else can dequeue.
0df7cd3c 720 * Unlock rx_lock since skb_copy_datagram_iter() may sleep.
44931195
AK
721 */
722 spin_unlock_bh(&vvs->rx_lock);
723
0df7cd3c
AK
724 err = skb_copy_datagram_iter(skb, 0,
725 &msg->msg_iter,
726 bytes_to_copy);
44931195 727 if (err) {
91aa49a8 728 /* Copy of message failed. Rest of
44931195
AK
729 * fragments will be freed without copy.
730 */
44931195
AK
731 dequeued_len = err;
732 } else {
733 user_buf_len -= bytes_to_copy;
734 }
735
736 spin_lock_bh(&vvs->rx_lock);
737 }
738
739 if (dequeued_len >= 0)
740 dequeued_len += pkt_len;
741 }
742
71dc9ec9 743 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
44931195
AK
744 msg_ready = true;
745 vvs->msg_count--;
8d5ac871 746
71dc9ec9 747 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
8d5ac871 748 msg->msg_flags |= MSG_EOR;
44931195
AK
749 }
750
07770616 751 virtio_transport_dec_rx_pkt(vvs, pkt_len);
71dc9ec9 752 kfree_skb(skb);
44931195
AK
753 }
754
755 spin_unlock_bh(&vvs->rx_lock);
756
757 virtio_transport_send_credit_update(vsk);
758
759 return dequeued_len;
760}
761
06a8fc78
AH
762ssize_t
763virtio_transport_stream_dequeue(struct vsock_sock *vsk,
764 struct msghdr *msg,
765 size_t len, int flags)
766{
767 if (flags & MSG_PEEK)
a786ab36
MEVL
768 return virtio_transport_stream_do_peek(vsk, msg, len);
769 else
770 return virtio_transport_stream_do_dequeue(vsk, msg, len);
06a8fc78
AH
771}
772EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
773
44931195
AK
774ssize_t
775virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
776 struct msghdr *msg,
777 int flags)
778{
779 if (flags & MSG_PEEK)
a75f501d
AK
780 return virtio_transport_seqpacket_do_peek(vsk, msg);
781 else
782 return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
44931195
AK
783}
784EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
785
9ac841f5
AK
786int
787virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
788 struct msghdr *msg,
789 size_t len)
790{
791 struct virtio_vsock_sock *vvs = vsk->trans;
792
793 spin_lock_bh(&vvs->tx_lock);
794
795 if (len > vvs->peer_buf_alloc) {
796 spin_unlock_bh(&vvs->tx_lock);
797 return -EMSGSIZE;
798 }
799
800 spin_unlock_bh(&vvs->tx_lock);
801
802 return virtio_transport_stream_enqueue(vsk, msg, len);
803}
804EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
805
06a8fc78
AH
806int
807virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
808 struct msghdr *msg,
809 size_t len, int flags)
810{
811 return -EOPNOTSUPP;
812}
813EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
814
815s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
816{
817 struct virtio_vsock_sock *vvs = vsk->trans;
818 s64 bytes;
819
820 spin_lock_bh(&vvs->rx_lock);
821 bytes = vvs->rx_bytes;
822 spin_unlock_bh(&vvs->rx_lock);
823
824 return bytes;
825}
826EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
827
9ac841f5
AK
828u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
829{
830 struct virtio_vsock_sock *vvs = vsk->trans;
831 u32 msg_count;
832
833 spin_lock_bh(&vvs->rx_lock);
834 msg_count = vvs->msg_count;
835 spin_unlock_bh(&vvs->rx_lock);
836
837 return msg_count;
838}
839EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
840
06a8fc78
AH
841static s64 virtio_transport_has_space(struct vsock_sock *vsk)
842{
843 struct virtio_vsock_sock *vvs = vsk->trans;
844 s64 bytes;
845
60316d7f 846 bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
06a8fc78
AH
847 if (bytes < 0)
848 bytes = 0;
849
850 return bytes;
851}
852
853s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
854{
855 struct virtio_vsock_sock *vvs = vsk->trans;
856 s64 bytes;
857
858 spin_lock_bh(&vvs->tx_lock);
859 bytes = virtio_transport_has_space(vsk);
860 spin_unlock_bh(&vvs->tx_lock);
861
862 return bytes;
863}
864EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
865
866int virtio_transport_do_socket_init(struct vsock_sock *vsk,
867 struct vsock_sock *psk)
868{
869 struct virtio_vsock_sock *vvs;
870
871 vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
872 if (!vvs)
873 return -ENOMEM;
874
875 vsk->trans = vvs;
876 vvs->vsk = vsk;
c0cfa2d8 877 if (psk && psk->trans) {
06a8fc78
AH
878 struct virtio_vsock_sock *ptrans = psk->trans;
879
06a8fc78 880 vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
06a8fc78
AH
881 }
882
b9f2b0ff
SG
883 if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
884 vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
885
886 vvs->buf_alloc = vsk->buffer_size;
06a8fc78
AH
887
888 spin_lock_init(&vvs->rx_lock);
889 spin_lock_init(&vvs->tx_lock);
71dc9ec9 890 skb_queue_head_init(&vvs->rx_queue);
06a8fc78
AH
891
892 return 0;
893}
894EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
895
b9f2b0ff
SG
896/* sk_lock held by the caller */
897void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
06a8fc78
AH
898{
899 struct virtio_vsock_sock *vvs = vsk->trans;
900
b9f2b0ff
SG
901 if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
902 *val = VIRTIO_VSOCK_MAX_BUF_SIZE;
06a8fc78 903
b9f2b0ff 904 vvs->buf_alloc = *val;
ec3359b6 905
c10844c5 906 virtio_transport_send_credit_update(vsk);
06a8fc78 907}
b9f2b0ff 908EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
06a8fc78
AH
909
910int
911virtio_transport_notify_poll_in(struct vsock_sock *vsk,
912 size_t target,
913 bool *data_ready_now)
914{
e7a3266c 915 *data_ready_now = vsock_stream_has_data(vsk) >= target;
06a8fc78
AH
916
917 return 0;
918}
919EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
920
921int
922virtio_transport_notify_poll_out(struct vsock_sock *vsk,
923 size_t target,
924 bool *space_avail_now)
925{
926 s64 free_space;
927
928 free_space = vsock_stream_has_space(vsk);
929 if (free_space > 0)
930 *space_avail_now = true;
931 else if (free_space == 0)
932 *space_avail_now = false;
933
934 return 0;
935}
936EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
937
938int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
939 size_t target, struct vsock_transport_recv_notify_data *data)
940{
941 return 0;
942}
943EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
944
945int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
946 size_t target, struct vsock_transport_recv_notify_data *data)
947{
948 return 0;
949}
950EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
951
952int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
953 size_t target, struct vsock_transport_recv_notify_data *data)
954{
955 return 0;
956}
957EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
958
959int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
960 size_t target, ssize_t copied, bool data_read,
961 struct vsock_transport_recv_notify_data *data)
962{
963 return 0;
964}
965EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
966
967int virtio_transport_notify_send_init(struct vsock_sock *vsk,
968 struct vsock_transport_send_notify_data *data)
969{
970 return 0;
971}
972EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
973
974int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
975 struct vsock_transport_send_notify_data *data)
976{
977 return 0;
978}
979EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
980
981int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
982 struct vsock_transport_send_notify_data *data)
983{
984 return 0;
985}
986EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
987
988int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
989 ssize_t written, struct vsock_transport_send_notify_data *data)
990{
991 return 0;
992}
993EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
994
995u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
996{
b9f2b0ff 997 return vsk->buffer_size;
06a8fc78
AH
998}
999EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
1000
1001bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
1002{
1003 return true;
1004}
1005EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
1006
1007bool virtio_transport_stream_allow(u32 cid, u32 port)
1008{
1009 return true;
1010}
1011EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
1012
1013int virtio_transport_dgram_bind(struct vsock_sock *vsk,
1014 struct sockaddr_vm *addr)
1015{
1016 return -EOPNOTSUPP;
1017}
1018EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
1019
1020bool virtio_transport_dgram_allow(u32 cid, u32 port)
1021{
1022 return false;
1023}
1024EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
1025
1026int virtio_transport_connect(struct vsock_sock *vsk)
1027{
1028 struct virtio_vsock_pkt_info info = {
1029 .op = VIRTIO_VSOCK_OP_REQUEST,
36d277ba 1030 .vsk = vsk,
06a8fc78
AH
1031 };
1032
1033 return virtio_transport_send_pkt_info(vsk, &info);
1034}
1035EXPORT_SYMBOL_GPL(virtio_transport_connect);
1036
1037int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
1038{
1039 struct virtio_vsock_pkt_info info = {
1040 .op = VIRTIO_VSOCK_OP_SHUTDOWN,
06a8fc78
AH
1041 .flags = (mode & RCV_SHUTDOWN ?
1042 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
1043 (mode & SEND_SHUTDOWN ?
1044 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
36d277ba 1045 .vsk = vsk,
06a8fc78
AH
1046 };
1047
1048 return virtio_transport_send_pkt_info(vsk, &info);
1049}
1050EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
1051
1052int
1053virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
1054 struct sockaddr_vm *remote_addr,
1055 struct msghdr *msg,
1056 size_t dgram_len)
1057{
1058 return -EOPNOTSUPP;
1059}
1060EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
1061
1062ssize_t
1063virtio_transport_stream_enqueue(struct vsock_sock *vsk,
1064 struct msghdr *msg,
1065 size_t len)
1066{
1067 struct virtio_vsock_pkt_info info = {
1068 .op = VIRTIO_VSOCK_OP_RW,
06a8fc78
AH
1069 .msg = msg,
1070 .pkt_len = len,
36d277ba 1071 .vsk = vsk,
06a8fc78
AH
1072 };
1073
1074 return virtio_transport_send_pkt_info(vsk, &info);
1075}
1076EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
1077
1078void virtio_transport_destruct(struct vsock_sock *vsk)
1079{
1080 struct virtio_vsock_sock *vvs = vsk->trans;
1081
1082 kfree(vvs);
1083}
1084EXPORT_SYMBOL_GPL(virtio_transport_destruct);
1085
1086static int virtio_transport_reset(struct vsock_sock *vsk,
71dc9ec9 1087 struct sk_buff *skb)
06a8fc78
AH
1088{
1089 struct virtio_vsock_pkt_info info = {
1090 .op = VIRTIO_VSOCK_OP_RST,
71dc9ec9 1091 .reply = !!skb,
36d277ba 1092 .vsk = vsk,
06a8fc78
AH
1093 };
1094
1095 /* Send RST only if the original pkt is not a RST pkt */
71dc9ec9 1096 if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
06a8fc78
AH
1097 return 0;
1098
1099 return virtio_transport_send_pkt_info(vsk, &info);
1100}
1101
1102/* Normally packets are associated with a socket. There may be no socket if an
1103 * attempt was made to connect to a socket that does not exist.
1104 */
4c7246dc 1105static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
71dc9ec9 1106 struct sk_buff *skb)
06a8fc78 1107{
71dc9ec9 1108 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78
AH
1109 struct virtio_vsock_pkt_info info = {
1110 .op = VIRTIO_VSOCK_OP_RST,
71dc9ec9 1111 .type = le16_to_cpu(hdr->type),
06a8fc78
AH
1112 .reply = true,
1113 };
71dc9ec9 1114 struct sk_buff *reply;
06a8fc78
AH
1115
1116 /* Send RST only if the original pkt is not a RST pkt */
71dc9ec9 1117 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
06a8fc78
AH
1118 return 0;
1119
4d1f5155
AK
1120 if (!t)
1121 return -ENOTCONN;
1122
581512a6 1123 reply = virtio_transport_alloc_skb(&info, 0, false,
71dc9ec9
BE
1124 le64_to_cpu(hdr->dst_cid),
1125 le32_to_cpu(hdr->dst_port),
1126 le64_to_cpu(hdr->src_cid),
1127 le32_to_cpu(hdr->src_port));
4c404ce2 1128 if (!reply)
06a8fc78
AH
1129 return -ENOMEM;
1130
4c404ce2 1131 return t->send_pkt(reply);
06a8fc78
AH
1132}
1133
8432b811
SG
1134/* This function should be called with sk_lock held and SOCK_DONE set */
1135static void virtio_transport_remove_sock(struct vsock_sock *vsk)
1136{
1137 struct virtio_vsock_sock *vvs = vsk->trans;
8432b811
SG
1138
1139 /* We don't need to take rx_lock, as the socket is closing and we are
1140 * removing it.
1141 */
71dc9ec9 1142 __skb_queue_purge(&vvs->rx_queue);
8432b811
SG
1143 vsock_remove_sock(vsk);
1144}
1145
06a8fc78
AH
1146static void virtio_transport_wait_close(struct sock *sk, long timeout)
1147{
1148 if (timeout) {
d9dc8b0f
WC
1149 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1150
1151 add_wait_queue(sk_sleep(sk), &wait);
06a8fc78
AH
1152
1153 do {
06a8fc78 1154 if (sk_wait_event(sk, &timeout,
d9dc8b0f 1155 sock_flag(sk, SOCK_DONE), &wait))
06a8fc78
AH
1156 break;
1157 } while (!signal_pending(current) && timeout);
1158
d9dc8b0f 1159 remove_wait_queue(sk_sleep(sk), &wait);
06a8fc78
AH
1160 }
1161}
1162
1163static void virtio_transport_do_close(struct vsock_sock *vsk,
1164 bool cancel_timeout)
1165{
1166 struct sock *sk = sk_vsock(vsk);
1167
1168 sock_set_flag(sk, SOCK_DONE);
1169 vsk->peer_shutdown = SHUTDOWN_MASK;
1170 if (vsock_stream_has_data(vsk) <= 0)
3b4477d2 1171 sk->sk_state = TCP_CLOSING;
06a8fc78
AH
1172 sk->sk_state_change(sk);
1173
1174 if (vsk->close_work_scheduled &&
1175 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
1176 vsk->close_work_scheduled = false;
1177
8432b811 1178 virtio_transport_remove_sock(vsk);
06a8fc78
AH
1179
1180 /* Release refcnt obtained when we scheduled the timeout */
1181 sock_put(sk);
1182 }
1183}
1184
1185static void virtio_transport_close_timeout(struct work_struct *work)
1186{
1187 struct vsock_sock *vsk =
1188 container_of(work, struct vsock_sock, close_work.work);
1189 struct sock *sk = sk_vsock(vsk);
1190
1191 sock_hold(sk);
1192 lock_sock(sk);
1193
1194 if (!sock_flag(sk, SOCK_DONE)) {
1195 (void)virtio_transport_reset(vsk, NULL);
1196
1197 virtio_transport_do_close(vsk, false);
1198 }
1199
1200 vsk->close_work_scheduled = false;
1201
1202 release_sock(sk);
1203 sock_put(sk);
1204}
1205
1206/* User context, vsk->sk is locked */
1207static bool virtio_transport_close(struct vsock_sock *vsk)
1208{
1209 struct sock *sk = &vsk->sk;
1210
3b4477d2
SH
1211 if (!(sk->sk_state == TCP_ESTABLISHED ||
1212 sk->sk_state == TCP_CLOSING))
06a8fc78
AH
1213 return true;
1214
1215 /* Already received SHUTDOWN from peer, reply with RST */
1216 if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
1217 (void)virtio_transport_reset(vsk, NULL);
1218 return true;
1219 }
1220
1221 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
1222 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
1223
1224 if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
1225 virtio_transport_wait_close(sk, sk->sk_lingertime);
1226
1227 if (sock_flag(sk, SOCK_DONE)) {
1228 return true;
1229 }
1230
1231 sock_hold(sk);
1232 INIT_DELAYED_WORK(&vsk->close_work,
1233 virtio_transport_close_timeout);
1234 vsk->close_work_scheduled = true;
1235 schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
1236 return false;
1237}
1238
1239void virtio_transport_release(struct vsock_sock *vsk)
1240{
1241 struct sock *sk = &vsk->sk;
1242 bool remove_sock = true;
1243
9ac841f5 1244 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
06a8fc78 1245 remove_sock = virtio_transport_close(vsk);
ac03046e 1246
3fe356d5
SG
1247 if (remove_sock) {
1248 sock_set_flag(sk, SOCK_DONE);
8432b811 1249 virtio_transport_remove_sock(vsk);
3fe356d5 1250 }
06a8fc78
AH
1251}
1252EXPORT_SYMBOL_GPL(virtio_transport_release);
1253
1254static int
1255virtio_transport_recv_connecting(struct sock *sk,
71dc9ec9 1256 struct sk_buff *skb)
06a8fc78 1257{
71dc9ec9 1258 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78 1259 struct vsock_sock *vsk = vsock_sk(sk);
06a8fc78 1260 int skerr;
71dc9ec9 1261 int err;
06a8fc78 1262
71dc9ec9 1263 switch (le16_to_cpu(hdr->op)) {
06a8fc78 1264 case VIRTIO_VSOCK_OP_RESPONSE:
3b4477d2 1265 sk->sk_state = TCP_ESTABLISHED;
06a8fc78
AH
1266 sk->sk_socket->state = SS_CONNECTED;
1267 vsock_insert_connected(vsk);
1268 sk->sk_state_change(sk);
1269 break;
1270 case VIRTIO_VSOCK_OP_INVALID:
1271 break;
1272 case VIRTIO_VSOCK_OP_RST:
1273 skerr = ECONNRESET;
1274 err = 0;
1275 goto destroy;
1276 default:
1277 skerr = EPROTO;
1278 err = -EINVAL;
1279 goto destroy;
1280 }
1281 return 0;
1282
1283destroy:
71dc9ec9 1284 virtio_transport_reset(vsk, skb);
3b4477d2 1285 sk->sk_state = TCP_CLOSE;
06a8fc78 1286 sk->sk_err = skerr;
e3ae2365 1287 sk_error_report(sk);
06a8fc78
AH
1288 return err;
1289}
1290
473c7391
SG
1291static void
1292virtio_transport_recv_enqueue(struct vsock_sock *vsk,
71dc9ec9 1293 struct sk_buff *skb)
473c7391
SG
1294{
1295 struct virtio_vsock_sock *vvs = vsk->trans;
ae6fcfbf 1296 bool can_enqueue, free_pkt = false;
71dc9ec9
BE
1297 struct virtio_vsock_hdr *hdr;
1298 u32 len;
473c7391 1299
71dc9ec9
BE
1300 hdr = virtio_vsock_hdr(skb);
1301 len = le32_to_cpu(hdr->len);
473c7391
SG
1302
1303 spin_lock_bh(&vvs->rx_lock);
1304
07770616 1305 can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
ae6fcfbf
SG
1306 if (!can_enqueue) {
1307 free_pkt = true;
1308 goto out;
1309 }
473c7391 1310
71dc9ec9 1311 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
e4b1ef15
AK
1312 vvs->msg_count++;
1313
473c7391
SG
1314 /* Try to copy small packets into the buffer of last packet queued,
1315 * to avoid wasting memory queueing the entire buffer with a small
1316 * payload.
1317 */
71dc9ec9
BE
1318 if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
1319 struct virtio_vsock_hdr *last_hdr;
1320 struct sk_buff *last_skb;
473c7391 1321
71dc9ec9
BE
1322 last_skb = skb_peek_tail(&vvs->rx_queue);
1323 last_hdr = virtio_vsock_hdr(last_skb);
473c7391
SG
1324
1325 /* If there is space in the last packet queued, we copy the
e4b1ef15 1326 * new packet in its buffer. We avoid this if the last packet
9af8f106
AK
1327 * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
1328 * delimiter of SEQPACKET message, so 'pkt' is the first packet
1329 * of a new message.
473c7391 1330 */
71dc9ec9
BE
1331 if (skb->len < skb_tailroom(last_skb) &&
1332 !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
1333 memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
473c7391 1334 free_pkt = true;
71dc9ec9 1335 last_hdr->flags |= hdr->flags;
f7154d96 1336 le32_add_cpu(&last_hdr->len, len);
473c7391
SG
1337 goto out;
1338 }
1339 }
1340
71dc9ec9 1341 __skb_queue_tail(&vvs->rx_queue, skb);
473c7391
SG
1342
1343out:
1344 spin_unlock_bh(&vvs->rx_lock);
1345 if (free_pkt)
71dc9ec9 1346 kfree_skb(skb);
473c7391
SG
1347}
1348
06a8fc78
AH
1349static int
1350virtio_transport_recv_connected(struct sock *sk,
71dc9ec9 1351 struct sk_buff *skb)
06a8fc78 1352{
71dc9ec9 1353 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78 1354 struct vsock_sock *vsk = vsock_sk(sk);
06a8fc78
AH
1355 int err = 0;
1356
71dc9ec9 1357 switch (le16_to_cpu(hdr->op)) {
06a8fc78 1358 case VIRTIO_VSOCK_OP_RW:
71dc9ec9 1359 virtio_transport_recv_enqueue(vsk, skb);
39f1ed33 1360 vsock_data_ready(sk);
06a8fc78 1361 return err;
e3ea110d
HU
1362 case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
1363 virtio_transport_send_credit_update(vsk);
1364 break;
06a8fc78
AH
1365 case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
1366 sk->sk_write_space(sk);
1367 break;
1368 case VIRTIO_VSOCK_OP_SHUTDOWN:
71dc9ec9 1369 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
06a8fc78 1370 vsk->peer_shutdown |= RCV_SHUTDOWN;
71dc9ec9 1371 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
06a8fc78 1372 vsk->peer_shutdown |= SEND_SHUTDOWN;
3a5cc90a
FS
1373 if (vsk->peer_shutdown == SHUTDOWN_MASK) {
1374 if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
1375 (void)virtio_transport_reset(vsk, NULL);
1376 virtio_transport_do_close(vsk, true);
1377 }
1378 /* Remove this socket anyway because the remote peer sent
1379 * the shutdown. This way a new connection will succeed
1380 * if the remote peer uses the same source port,
1381 * even if the old socket is still unreleased, but now disconnected.
1382 */
1383 vsock_remove_sock(vsk);
42f5cda5 1384 }
71dc9ec9 1385 if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
06a8fc78
AH
1386 sk->sk_state_change(sk);
1387 break;
1388 case VIRTIO_VSOCK_OP_RST:
1389 virtio_transport_do_close(vsk, true);
1390 break;
1391 default:
1392 err = -EINVAL;
1393 break;
1394 }
1395
71dc9ec9 1396 kfree_skb(skb);
06a8fc78
AH
1397 return err;
1398}
1399
1400static void
1401virtio_transport_recv_disconnecting(struct sock *sk,
71dc9ec9 1402 struct sk_buff *skb)
06a8fc78 1403{
71dc9ec9 1404 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78
AH
1405 struct vsock_sock *vsk = vsock_sk(sk);
1406
71dc9ec9 1407 if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
06a8fc78
AH
1408 virtio_transport_do_close(vsk, true);
1409}
1410
1411static int
1412virtio_transport_send_response(struct vsock_sock *vsk,
71dc9ec9 1413 struct sk_buff *skb)
06a8fc78 1414{
71dc9ec9 1415 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78
AH
1416 struct virtio_vsock_pkt_info info = {
1417 .op = VIRTIO_VSOCK_OP_RESPONSE,
71dc9ec9
BE
1418 .remote_cid = le64_to_cpu(hdr->src_cid),
1419 .remote_port = le32_to_cpu(hdr->src_port),
06a8fc78 1420 .reply = true,
36d277ba 1421 .vsk = vsk,
06a8fc78
AH
1422 };
1423
1424 return virtio_transport_send_pkt_info(vsk, &info);
1425}
1426
c0cfa2d8 1427static bool virtio_transport_space_update(struct sock *sk,
71dc9ec9 1428 struct sk_buff *skb)
c0cfa2d8 1429{
71dc9ec9 1430 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
c0cfa2d8
SG
1431 struct vsock_sock *vsk = vsock_sk(sk);
1432 struct virtio_vsock_sock *vvs = vsk->trans;
1433 bool space_available;
1434
1435 /* Listener sockets are not associated with any transport, so we are
1436 * not able to take the state to see if there is space available in the
1437 * remote peer, but since they are only used to receive requests, we
1438 * can assume that there is always space available in the other peer.
1439 */
1440 if (!vvs)
1441 return true;
1442
1443 /* buf_alloc and fwd_cnt is always included in the hdr */
1444 spin_lock_bh(&vvs->tx_lock);
71dc9ec9
BE
1445 vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
1446 vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
c0cfa2d8
SG
1447 space_available = virtio_transport_has_space(vsk);
1448 spin_unlock_bh(&vvs->tx_lock);
1449 return space_available;
1450}
1451
06a8fc78
AH
1452/* Handle server socket */
1453static int
71dc9ec9 1454virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
c0cfa2d8 1455 struct virtio_transport *t)
06a8fc78 1456{
71dc9ec9 1457 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78
AH
1458 struct vsock_sock *vsk = vsock_sk(sk);
1459 struct vsock_sock *vchild;
1460 struct sock *child;
c0cfa2d8 1461 int ret;
06a8fc78 1462
71dc9ec9
BE
1463 if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
1464 virtio_transport_reset_no_sock(t, skb);
06a8fc78
AH
1465 return -EINVAL;
1466 }
1467
1468 if (sk_acceptq_is_full(sk)) {
71dc9ec9 1469 virtio_transport_reset_no_sock(t, skb);
06a8fc78
AH
1470 return -ENOMEM;
1471 }
1472
b9ca2f5f 1473 child = vsock_create_connected(sk);
06a8fc78 1474 if (!child) {
71dc9ec9 1475 virtio_transport_reset_no_sock(t, skb);
06a8fc78
AH
1476 return -ENOMEM;
1477 }
1478
7976a11b 1479 sk_acceptq_added(sk);
06a8fc78
AH
1480
1481 lock_sock_nested(child, SINGLE_DEPTH_NESTING);
1482
3b4477d2 1483 child->sk_state = TCP_ESTABLISHED;
06a8fc78
AH
1484
1485 vchild = vsock_sk(child);
71dc9ec9
BE
1486 vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
1487 le32_to_cpu(hdr->dst_port));
1488 vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
1489 le32_to_cpu(hdr->src_port));
06a8fc78 1490
c0cfa2d8
SG
1491 ret = vsock_assign_transport(vchild, vsk);
1492 /* Transport assigned (looking at remote_addr) must be the same
1493 * where we received the request.
1494 */
1495 if (ret || vchild->transport != &t->transport) {
1496 release_sock(child);
71dc9ec9 1497 virtio_transport_reset_no_sock(t, skb);
c0cfa2d8
SG
1498 sock_put(child);
1499 return ret;
1500 }
1501
71dc9ec9 1502 if (virtio_transport_space_update(child, skb))
c0cfa2d8
SG
1503 child->sk_write_space(child);
1504
06a8fc78
AH
1505 vsock_insert_connected(vchild);
1506 vsock_enqueue_accept(sk, child);
71dc9ec9 1507 virtio_transport_send_response(vchild, skb);
06a8fc78
AH
1508
1509 release_sock(child);
1510
1511 sk->sk_data_ready(sk);
1512 return 0;
1513}
1514
e4b1ef15
AK
1515static bool virtio_transport_valid_type(u16 type)
1516{
1517 return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
1518 (type == VIRTIO_VSOCK_TYPE_SEQPACKET);
1519}
1520
06a8fc78
AH
1521/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
1522 * lock.
1523 */
4c7246dc 1524void virtio_transport_recv_pkt(struct virtio_transport *t,
71dc9ec9 1525 struct sk_buff *skb)
06a8fc78 1526{
71dc9ec9 1527 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
06a8fc78
AH
1528 struct sockaddr_vm src, dst;
1529 struct vsock_sock *vsk;
1530 struct sock *sk;
1531 bool space_available;
1532
71dc9ec9
BE
1533 vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
1534 le32_to_cpu(hdr->src_port));
1535 vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
1536 le32_to_cpu(hdr->dst_port));
06a8fc78
AH
1537
1538 trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
1539 dst.svm_cid, dst.svm_port,
71dc9ec9
BE
1540 le32_to_cpu(hdr->len),
1541 le16_to_cpu(hdr->type),
1542 le16_to_cpu(hdr->op),
1543 le32_to_cpu(hdr->flags),
1544 le32_to_cpu(hdr->buf_alloc),
1545 le32_to_cpu(hdr->fwd_cnt));
1546
1547 if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
1548 (void)virtio_transport_reset_no_sock(t, skb);
06a8fc78
AH
1549 goto free_pkt;
1550 }
1551
1552 /* The socket must be in connected or bound table
1553 * otherwise send reset back
1554 */
1555 sk = vsock_find_connected_socket(&src, &dst);
1556 if (!sk) {
1557 sk = vsock_find_bound_socket(&dst);
1558 if (!sk) {
71dc9ec9 1559 (void)virtio_transport_reset_no_sock(t, skb);
06a8fc78
AH
1560 goto free_pkt;
1561 }
1562 }
1563
71dc9ec9
BE
1564 if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
1565 (void)virtio_transport_reset_no_sock(t, skb);
e4b1ef15
AK
1566 sock_put(sk);
1567 goto free_pkt;
1568 }
1569
f9d2b1e1
BE
1570 if (!skb_set_owner_sk_safe(skb, sk)) {
1571 WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
1572 goto free_pkt;
1573 }
1574
06a8fc78
AH
1575 vsk = vsock_sk(sk);
1576
06a8fc78
AH
1577 lock_sock(sk);
1578
3fe356d5
SG
1579 /* Check if sk has been closed before lock_sock */
1580 if (sock_flag(sk, SOCK_DONE)) {
71dc9ec9 1581 (void)virtio_transport_reset_no_sock(t, skb);
8692cefc
JH
1582 release_sock(sk);
1583 sock_put(sk);
1584 goto free_pkt;
1585 }
1586
71dc9ec9 1587 space_available = virtio_transport_space_update(sk, skb);
ce7536bc 1588
06a8fc78 1589 /* Update CID in case it has changed after a transport reset event */
1db8f5fc
WW
1590 if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
1591 vsk->local_addr.svm_cid = dst.svm_cid;
06a8fc78
AH
1592
1593 if (space_available)
1594 sk->sk_write_space(sk);
1595
1596 switch (sk->sk_state) {
3b4477d2 1597 case TCP_LISTEN:
71dc9ec9
BE
1598 virtio_transport_recv_listen(sk, skb, t);
1599 kfree_skb(skb);
06a8fc78 1600 break;
3b4477d2 1601 case TCP_SYN_SENT:
71dc9ec9
BE
1602 virtio_transport_recv_connecting(sk, skb);
1603 kfree_skb(skb);
06a8fc78 1604 break;
3b4477d2 1605 case TCP_ESTABLISHED:
71dc9ec9 1606 virtio_transport_recv_connected(sk, skb);
06a8fc78 1607 break;
3b4477d2 1608 case TCP_CLOSING:
71dc9ec9
BE
1609 virtio_transport_recv_disconnecting(sk, skb);
1610 kfree_skb(skb);
06a8fc78
AH
1611 break;
1612 default:
71dc9ec9
BE
1613 (void)virtio_transport_reset_no_sock(t, skb);
1614 kfree_skb(skb);
06a8fc78
AH
1615 break;
1616 }
c0cfa2d8 1617
06a8fc78
AH
1618 release_sock(sk);
1619
1620 /* Release refcnt obtained when we fetched this socket out of the
1621 * bound or connected list.
1622 */
1623 sock_put(sk);
1624 return;
1625
1626free_pkt:
71dc9ec9 1627 kfree_skb(skb);
06a8fc78
AH
1628}
1629EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
1630
71dc9ec9
BE
1631/* Remove skbs found in a queue that have a vsk that matches.
1632 *
1633 * Each skb is freed.
1634 *
1635 * Returns the count of skbs that were reply packets.
1636 */
1637int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
06a8fc78 1638{
71dc9ec9
BE
1639 struct sk_buff_head freeme;
1640 struct sk_buff *skb, *tmp;
1641 int cnt = 0;
1642
1643 skb_queue_head_init(&freeme);
1644
1645 spin_lock_bh(&queue->lock);
1646 skb_queue_walk_safe(queue, skb, tmp) {
1647 if (vsock_sk(skb->sk) != vsk)
1648 continue;
1649
1650 __skb_unlink(skb, queue);
1651 __skb_queue_tail(&freeme, skb);
1652
1653 if (virtio_vsock_skb_reply(skb))
1654 cnt++;
1655 }
1656 spin_unlock_bh(&queue->lock);
1657
1658 __skb_queue_purge(&freeme);
1659
1660 return cnt;
06a8fc78 1661}
71dc9ec9 1662EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
06a8fc78 1663
634f1a71
BE
1664int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_actor)
1665{
1666 struct virtio_vsock_sock *vvs = vsk->trans;
1667 struct sock *sk = sk_vsock(vsk);
1668 struct sk_buff *skb;
1669 int off = 0;
634f1a71
BE
1670 int err;
1671
1672 spin_lock_bh(&vvs->rx_lock);
1673 /* Use __skb_recv_datagram() for race-free handling of the receive. It
1674 * works for types other than dgrams.
1675 */
1676 skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
1677 spin_unlock_bh(&vvs->rx_lock);
1678
1679 if (!skb)
1680 return err;
1681
78fa0d61 1682 return recv_actor(sk, skb);
634f1a71
BE
1683}
1684EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
1685
06a8fc78
AH
1686MODULE_LICENSE("GPL v2");
1687MODULE_AUTHOR("Asias He");
1688MODULE_DESCRIPTION("common code for virtio vsock");