]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/net/net_tx_pkt.c
4a35e8429d20055b6ba00b445e5dbbb221db6fbc
[thirdparty/qemu.git] / hw / net / net_tx_pkt.c
1 /*
2 * QEMU TX packets abstractions
3 *
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5 *
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
7 *
8 * Authors:
9 * Dmitry Fleytman <dmitry@daynix.com>
10 * Tamir Shomer <tamirs@daynix.com>
11 * Yan Vugenfirer <yan@daynix.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
15 *
16 */
17
18 #include "qemu/osdep.h"
19 #include "net_tx_pkt.h"
20 #include "net/eth.h"
21 #include "net/checksum.h"
22 #include "net/tap.h"
23 #include "net/net.h"
24 #include "hw/pci/pci_device.h"
25
26 enum {
27 NET_TX_PKT_VHDR_FRAG = 0,
28 NET_TX_PKT_L2HDR_FRAG,
29 NET_TX_PKT_L3HDR_FRAG,
30 NET_TX_PKT_PL_START_FRAG
31 };
32
33 /* TX packet private context */
34 struct NetTxPkt {
35 PCIDevice *pci_dev;
36
37 struct virtio_net_hdr virt_hdr;
38
39 struct iovec *raw;
40 uint32_t raw_frags;
41 uint32_t max_raw_frags;
42
43 struct iovec *vec;
44
45 uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN];
46 uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN];
47
48 uint32_t payload_len;
49
50 uint32_t payload_frags;
51 uint32_t max_payload_frags;
52
53 uint16_t hdr_len;
54 eth_pkt_types_e packet_type;
55 uint8_t l4proto;
56 };
57
58 void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
59 uint32_t max_frags)
60 {
61 struct NetTxPkt *p = g_malloc0(sizeof *p);
62
63 p->pci_dev = pci_dev;
64
65 p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
66
67 p->raw = g_new(struct iovec, max_frags);
68
69 p->max_payload_frags = max_frags;
70 p->max_raw_frags = max_frags;
71 p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
72 p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr;
73 p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
74 p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr;
75
76 *pkt = p;
77 }
78
79 void net_tx_pkt_uninit(struct NetTxPkt *pkt)
80 {
81 if (pkt) {
82 g_free(pkt->vec);
83 g_free(pkt->raw);
84 g_free(pkt);
85 }
86 }
87
88 void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt)
89 {
90 uint16_t csum;
91 assert(pkt);
92 struct ip_header *ip_hdr;
93 ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
94
95 ip_hdr->ip_len = cpu_to_be16(pkt->payload_len +
96 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);
97
98 ip_hdr->ip_sum = 0;
99 csum = net_raw_checksum((uint8_t *)ip_hdr,
100 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);
101 ip_hdr->ip_sum = cpu_to_be16(csum);
102 }
103
104 void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt)
105 {
106 uint16_t csum;
107 uint32_t cntr, cso;
108 assert(pkt);
109 uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
110 void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
111
112 if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len >
113 ETH_MAX_IP_DGRAM_LEN) {
114 return;
115 }
116
117 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
118 gso_type == VIRTIO_NET_HDR_GSO_UDP) {
119 /* Calculate IP header checksum */
120 net_tx_pkt_update_ip_hdr_checksum(pkt);
121
122 /* Calculate IP pseudo header checksum */
123 cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso);
124 csum = cpu_to_be16(~net_checksum_finish(cntr));
125 } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
126 /* Calculate IP pseudo header checksum */
127 cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len,
128 IP_PROTO_TCP, &cso);
129 csum = cpu_to_be16(~net_checksum_finish(cntr));
130 } else {
131 return;
132 }
133
134 iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
135 pkt->virt_hdr.csum_offset, &csum, sizeof(csum));
136 }
137
138 static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt)
139 {
140 pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +
141 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
142 }
143
144 static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt)
145 {
146 struct iovec *l2_hdr, *l3_hdr;
147 size_t bytes_read;
148 size_t full_ip6hdr_len;
149 uint16_t l3_proto;
150
151 assert(pkt);
152
153 l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
154 l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG];
155
156 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
157 ETH_MAX_L2_HDR_LEN);
158 if (bytes_read < sizeof(struct eth_header)) {
159 l2_hdr->iov_len = 0;
160 return false;
161 }
162
163 l2_hdr->iov_len = sizeof(struct eth_header);
164 switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) {
165 case ETH_P_VLAN:
166 l2_hdr->iov_len += sizeof(struct vlan_header);
167 break;
168 case ETH_P_DVLAN:
169 l2_hdr->iov_len += 2 * sizeof(struct vlan_header);
170 break;
171 }
172
173 if (bytes_read < l2_hdr->iov_len) {
174 l2_hdr->iov_len = 0;
175 l3_hdr->iov_len = 0;
176 pkt->packet_type = ETH_PKT_UCAST;
177 return false;
178 } else {
179 l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN;
180 l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
181 pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
182 }
183
184 l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len);
185
186 switch (l3_proto) {
187 case ETH_P_IP:
188 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
189 l3_hdr->iov_base, sizeof(struct ip_header));
190
191 if (bytes_read < sizeof(struct ip_header)) {
192 l3_hdr->iov_len = 0;
193 return false;
194 }
195
196 l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
197
198 if (l3_hdr->iov_len < sizeof(struct ip_header)) {
199 l3_hdr->iov_len = 0;
200 return false;
201 }
202
203 pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base);
204
205 if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) {
206 /* copy optional IPv4 header data if any*/
207 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
208 l2_hdr->iov_len + sizeof(struct ip_header),
209 l3_hdr->iov_base + sizeof(struct ip_header),
210 l3_hdr->iov_len - sizeof(struct ip_header));
211 if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
212 l3_hdr->iov_len = 0;
213 return false;
214 }
215 }
216
217 break;
218
219 case ETH_P_IPV6:
220 {
221 eth_ip6_hdr_info hdrinfo;
222
223 if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
224 &hdrinfo)) {
225 l3_hdr->iov_len = 0;
226 return false;
227 }
228
229 pkt->l4proto = hdrinfo.l4proto;
230 full_ip6hdr_len = hdrinfo.full_hdr_len;
231
232 if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) {
233 l3_hdr->iov_len = 0;
234 return false;
235 }
236
237 bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
238 l3_hdr->iov_base, full_ip6hdr_len);
239
240 if (bytes_read < full_ip6hdr_len) {
241 l3_hdr->iov_len = 0;
242 return false;
243 } else {
244 l3_hdr->iov_len = full_ip6hdr_len;
245 }
246 break;
247 }
248 default:
249 l3_hdr->iov_len = 0;
250 break;
251 }
252
253 net_tx_pkt_calculate_hdr_len(pkt);
254 return true;
255 }
256
257 static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt)
258 {
259 pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len;
260 pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
261 pkt->max_payload_frags,
262 pkt->raw, pkt->raw_frags,
263 pkt->hdr_len, pkt->payload_len);
264 }
265
266 bool net_tx_pkt_parse(struct NetTxPkt *pkt)
267 {
268 if (net_tx_pkt_parse_headers(pkt)) {
269 net_tx_pkt_rebuild_payload(pkt);
270 return true;
271 } else {
272 return false;
273 }
274 }
275
276 struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt)
277 {
278 assert(pkt);
279 return &pkt->virt_hdr;
280 }
281
282 static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt,
283 bool tso_enable)
284 {
285 uint8_t rc = VIRTIO_NET_HDR_GSO_NONE;
286 uint16_t l3_proto;
287
288 l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1,
289 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len);
290
291 if (!tso_enable) {
292 goto func_exit;
293 }
294
295 rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
296 pkt->l4proto);
297
298 func_exit:
299 return rc;
300 }
301
302 bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
303 bool csum_enable, uint32_t gso_size)
304 {
305 struct tcp_hdr l4hdr;
306 size_t bytes_read;
307 assert(pkt);
308
309 /* csum has to be enabled if tso is. */
310 assert(csum_enable || !tso_enable);
311
312 pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable);
313
314 switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
315 case VIRTIO_NET_HDR_GSO_NONE:
316 pkt->virt_hdr.hdr_len = 0;
317 pkt->virt_hdr.gso_size = 0;
318 break;
319
320 case VIRTIO_NET_HDR_GSO_UDP:
321 pkt->virt_hdr.gso_size = gso_size;
322 pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header);
323 break;
324
325 case VIRTIO_NET_HDR_GSO_TCPV4:
326 case VIRTIO_NET_HDR_GSO_TCPV6:
327 bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
328 pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr));
329 if (bytes_read < sizeof(l4hdr) ||
330 l4hdr.th_off * sizeof(uint32_t) < sizeof(l4hdr)) {
331 return false;
332 }
333
334 pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
335 pkt->virt_hdr.gso_size = gso_size;
336 break;
337
338 default:
339 g_assert_not_reached();
340 }
341
342 if (csum_enable) {
343 switch (pkt->l4proto) {
344 case IP_PROTO_TCP:
345 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
346 pkt->virt_hdr.csum_start = pkt->hdr_len;
347 pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
348 break;
349 case IP_PROTO_UDP:
350 pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
351 pkt->virt_hdr.csum_start = pkt->hdr_len;
352 pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
353 break;
354 default:
355 break;
356 }
357 }
358
359 return true;
360 }
361
362 void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
363 uint16_t vlan, uint16_t vlan_ethtype)
364 {
365 bool is_new;
366 assert(pkt);
367
368 eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base,
369 vlan, vlan_ethtype, &is_new);
370
371 /* update l2hdrlen */
372 if (is_new) {
373 pkt->hdr_len += sizeof(struct vlan_header);
374 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +=
375 sizeof(struct vlan_header);
376 }
377 }
378
379 bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
380 size_t len)
381 {
382 hwaddr mapped_len = 0;
383 struct iovec *ventry;
384 assert(pkt);
385
386 if (pkt->raw_frags >= pkt->max_raw_frags) {
387 return false;
388 }
389
390 if (!len) {
391 return true;
392 }
393
394 ventry = &pkt->raw[pkt->raw_frags];
395 mapped_len = len;
396
397 ventry->iov_base = pci_dma_map(pkt->pci_dev, pa,
398 &mapped_len, DMA_DIRECTION_TO_DEVICE);
399
400 if ((ventry->iov_base != NULL) && (len == mapped_len)) {
401 ventry->iov_len = mapped_len;
402 pkt->raw_frags++;
403 return true;
404 } else {
405 return false;
406 }
407 }
408
409 bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt)
410 {
411 return pkt->raw_frags > 0;
412 }
413
414 eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt)
415 {
416 assert(pkt);
417
418 return pkt->packet_type;
419 }
420
421 size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt)
422 {
423 assert(pkt);
424
425 return pkt->hdr_len + pkt->payload_len;
426 }
427
428 void net_tx_pkt_dump(struct NetTxPkt *pkt)
429 {
430 #ifdef NET_TX_PKT_DEBUG
431 assert(pkt);
432
433 printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
434 "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type,
435 pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len,
436 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len);
437 #endif
438 }
439
440 void net_tx_pkt_reset(struct NetTxPkt *pkt)
441 {
442 int i;
443
444 /* no assert, as reset can be called before tx_pkt_init */
445 if (!pkt) {
446 return;
447 }
448
449 memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
450
451 assert(pkt->vec);
452
453 pkt->payload_len = 0;
454 pkt->payload_frags = 0;
455
456 if (pkt->max_raw_frags > 0) {
457 assert(pkt->raw);
458 for (i = 0; i < pkt->raw_frags; i++) {
459 assert(pkt->raw[i].iov_base);
460 pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base,
461 pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
462 }
463 }
464 pkt->raw_frags = 0;
465
466 pkt->hdr_len = 0;
467 pkt->l4proto = 0;
468 }
469
470 static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt,
471 struct iovec *iov, uint32_t iov_len,
472 uint16_t csl)
473 {
474 uint32_t csum_cntr;
475 uint16_t csum = 0;
476 uint32_t cso;
477 /* num of iovec without vhdr */
478 size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
479 uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len);
480
481 /* Put zero to checksum field */
482 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
483
484 /* Calculate L4 TCP/UDP checksum */
485 csum_cntr = 0;
486 cso = 0;
487 /* add pseudo header to csum */
488 if (l3_proto == ETH_P_IP) {
489 csum_cntr = eth_calc_ip4_pseudo_hdr_csum(
490 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
491 csl, &cso);
492 } else if (l3_proto == ETH_P_IPV6) {
493 csum_cntr = eth_calc_ip6_pseudo_hdr_csum(
494 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base,
495 csl, pkt->l4proto, &cso);
496 }
497
498 /* data checksum */
499 csum_cntr +=
500 net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso);
501
502 /* Put the checksum obtained into the packet */
503 csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr));
504 iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
505 }
506
507 #define NET_MAX_FRAG_SG_LIST (64)
508
509 static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt,
510 int *src_idx, size_t *src_offset, size_t src_len,
511 struct iovec *dst, int *dst_idx)
512 {
513 size_t fetched = 0;
514 struct iovec *src = pkt->vec;
515
516 while (fetched < src_len) {
517
518 /* no more place in fragment iov */
519 if (*dst_idx == NET_MAX_FRAG_SG_LIST) {
520 break;
521 }
522
523 /* no more data in iovec */
524 if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) {
525 break;
526 }
527
528
529 dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
530 dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
531 src_len - fetched);
532
533 *src_offset += dst[*dst_idx].iov_len;
534 fetched += dst[*dst_idx].iov_len;
535
536 if (*src_offset == src[*src_idx].iov_len) {
537 *src_offset = 0;
538 (*src_idx)++;
539 }
540
541 (*dst_idx)++;
542 }
543
544 return fetched;
545 }
546
547 static void net_tx_pkt_sendv(
548 void *opaque, const struct iovec *iov, int iov_cnt,
549 const struct iovec *virt_iov, int virt_iov_cnt)
550 {
551 NetClientState *nc = opaque;
552
553 if (qemu_get_using_vnet_hdr(nc->peer)) {
554 qemu_sendv_packet(nc, virt_iov, virt_iov_cnt);
555 } else {
556 qemu_sendv_packet(nc, iov, iov_cnt);
557 }
558 }
559
560 static bool net_tx_pkt_tcp_fragment_init(struct NetTxPkt *pkt,
561 struct iovec *fragment,
562 int *pl_idx,
563 size_t *l4hdr_len,
564 int *src_idx,
565 size_t *src_offset,
566 size_t *src_len)
567 {
568 struct iovec *l4 = fragment + NET_TX_PKT_PL_START_FRAG;
569 size_t bytes_read = 0;
570 struct tcp_hdr *th;
571
572 if (!pkt->payload_frags) {
573 return false;
574 }
575
576 l4->iov_len = pkt->virt_hdr.hdr_len - pkt->hdr_len;
577 l4->iov_base = g_malloc(l4->iov_len);
578
579 *src_idx = NET_TX_PKT_PL_START_FRAG;
580 while (pkt->vec[*src_idx].iov_len < l4->iov_len - bytes_read) {
581 memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
582 pkt->vec[*src_idx].iov_len);
583
584 bytes_read += pkt->vec[*src_idx].iov_len;
585
586 (*src_idx)++;
587 if (*src_idx >= pkt->payload_frags + NET_TX_PKT_PL_START_FRAG) {
588 g_free(l4->iov_base);
589 return false;
590 }
591 }
592
593 *src_offset = l4->iov_len - bytes_read;
594 memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
595 *src_offset);
596
597 th = l4->iov_base;
598 th->th_flags &= ~(TH_FIN | TH_PUSH);
599
600 *pl_idx = NET_TX_PKT_PL_START_FRAG + 1;
601 *l4hdr_len = l4->iov_len;
602 *src_len = pkt->virt_hdr.gso_size;
603
604 return true;
605 }
606
607 static void net_tx_pkt_tcp_fragment_deinit(struct iovec *fragment)
608 {
609 g_free(fragment[NET_TX_PKT_PL_START_FRAG].iov_base);
610 }
611
612 static void net_tx_pkt_tcp_fragment_fix(struct NetTxPkt *pkt,
613 struct iovec *fragment,
614 size_t fragment_len,
615 uint8_t gso_type)
616 {
617 struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
618 struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
619 struct ip_header *ip = l3hdr->iov_base;
620 struct ip6_header *ip6 = l3hdr->iov_base;
621 size_t len = l3hdr->iov_len + l4hdr->iov_len + fragment_len;
622
623 switch (gso_type) {
624 case VIRTIO_NET_HDR_GSO_TCPV4:
625 ip->ip_len = cpu_to_be16(len);
626 eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
627 break;
628
629 case VIRTIO_NET_HDR_GSO_TCPV6:
630 len -= sizeof(struct ip6_header);
631 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = cpu_to_be16(len);
632 break;
633 }
634 }
635
636 static void net_tx_pkt_tcp_fragment_advance(struct NetTxPkt *pkt,
637 struct iovec *fragment,
638 size_t fragment_len,
639 uint8_t gso_type)
640 {
641 struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
642 struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
643 struct ip_header *ip = l3hdr->iov_base;
644 struct tcp_hdr *th = l4hdr->iov_base;
645
646 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4) {
647 ip->ip_id = cpu_to_be16(be16_to_cpu(ip->ip_id) + 1);
648 }
649
650 th->th_seq = cpu_to_be32(be32_to_cpu(th->th_seq) + fragment_len);
651 th->th_flags &= ~TH_CWR;
652 }
653
654 static void net_tx_pkt_udp_fragment_init(struct NetTxPkt *pkt,
655 int *pl_idx,
656 size_t *l4hdr_len,
657 int *src_idx, size_t *src_offset,
658 size_t *src_len)
659 {
660 *pl_idx = NET_TX_PKT_PL_START_FRAG;
661 *l4hdr_len = 0;
662 *src_idx = NET_TX_PKT_PL_START_FRAG;
663 *src_offset = 0;
664 *src_len = IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size);
665 }
666
667 static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt,
668 struct iovec *fragment,
669 size_t fragment_offset,
670 size_t fragment_len)
671 {
672 bool more_frags = fragment_offset + fragment_len < pkt->payload_len;
673 uint16_t orig_flags;
674 struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
675 struct ip_header *ip = l3hdr->iov_base;
676 uint16_t frag_off_units = fragment_offset / IP_FRAG_UNIT_SIZE;
677 uint16_t new_ip_off;
678
679 assert(fragment_offset % IP_FRAG_UNIT_SIZE == 0);
680 assert((frag_off_units & ~IP_OFFMASK) == 0);
681
682 orig_flags = be16_to_cpu(ip->ip_off) & ~(IP_OFFMASK | IP_MF);
683 new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
684 ip->ip_off = cpu_to_be16(new_ip_off);
685 ip->ip_len = cpu_to_be16(l3hdr->iov_len + fragment_len);
686
687 eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
688 }
689
690 static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
691 NetTxPktCallback callback,
692 void *context)
693 {
694 uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
695
696 struct iovec fragment[NET_MAX_FRAG_SG_LIST];
697 size_t fragment_len;
698 size_t l4hdr_len;
699 size_t src_len;
700
701 int src_idx, dst_idx, pl_idx;
702 size_t src_offset;
703 size_t fragment_offset = 0;
704 struct virtio_net_hdr virt_hdr = {
705 .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ?
706 VIRTIO_NET_HDR_F_DATA_VALID : 0
707 };
708
709 /* Copy headers */
710 fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr;
711 fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr);
712 fragment[NET_TX_PKT_L2HDR_FRAG] = pkt->vec[NET_TX_PKT_L2HDR_FRAG];
713 fragment[NET_TX_PKT_L3HDR_FRAG] = pkt->vec[NET_TX_PKT_L3HDR_FRAG];
714
715 switch (gso_type) {
716 case VIRTIO_NET_HDR_GSO_TCPV4:
717 case VIRTIO_NET_HDR_GSO_TCPV6:
718 if (!net_tx_pkt_tcp_fragment_init(pkt, fragment, &pl_idx, &l4hdr_len,
719 &src_idx, &src_offset, &src_len)) {
720 return false;
721 }
722 break;
723
724 case VIRTIO_NET_HDR_GSO_UDP:
725 net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
726 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
727 pkt->payload_len);
728 net_tx_pkt_udp_fragment_init(pkt, &pl_idx, &l4hdr_len,
729 &src_idx, &src_offset, &src_len);
730 break;
731
732 default:
733 abort();
734 }
735
736 /* Put as much data as possible and send */
737 while (true) {
738 dst_idx = pl_idx;
739 fragment_len = net_tx_pkt_fetch_fragment(pkt,
740 &src_idx, &src_offset, src_len, fragment, &dst_idx);
741 if (!fragment_len) {
742 break;
743 }
744
745 switch (gso_type) {
746 case VIRTIO_NET_HDR_GSO_TCPV4:
747 case VIRTIO_NET_HDR_GSO_TCPV6:
748 net_tx_pkt_tcp_fragment_fix(pkt, fragment, fragment_len, gso_type);
749 net_tx_pkt_do_sw_csum(pkt, fragment + NET_TX_PKT_L2HDR_FRAG,
750 dst_idx - NET_TX_PKT_L2HDR_FRAG,
751 l4hdr_len + fragment_len);
752 break;
753
754 case VIRTIO_NET_HDR_GSO_UDP:
755 net_tx_pkt_udp_fragment_fix(pkt, fragment, fragment_offset,
756 fragment_len);
757 break;
758 }
759
760 callback(context,
761 fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG,
762 fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG);
763
764 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
765 gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
766 net_tx_pkt_tcp_fragment_advance(pkt, fragment, fragment_len,
767 gso_type);
768 }
769
770 fragment_offset += fragment_len;
771 }
772
773 if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
774 gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
775 net_tx_pkt_tcp_fragment_deinit(fragment);
776 }
777
778 return true;
779 }
780
781 bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
782 {
783 bool offload = qemu_get_using_vnet_hdr(nc->peer);
784 return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc);
785 }
786
787 bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
788 NetTxPktCallback callback, void *context)
789 {
790 assert(pkt);
791
792 /*
793 * Since underlying infrastructure does not support IP datagrams longer
794 * than 64K we should drop such packets and don't even try to send
795 */
796 if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) {
797 if (pkt->payload_len >
798 ETH_MAX_IP_DGRAM_LEN -
799 pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) {
800 return false;
801 }
802 }
803
804 if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
805 if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
806 net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
807 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
808 pkt->payload_len);
809 }
810
811 net_tx_pkt_fix_ip6_payload_len(pkt);
812 callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG,
813 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG,
814 pkt->vec + NET_TX_PKT_VHDR_FRAG,
815 pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG);
816 return true;
817 }
818
819 return net_tx_pkt_do_sw_fragmentation(pkt, callback, context);
820 }
821
822 void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt)
823 {
824 struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
825 if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) {
826 struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr;
827 /*
828 * TODO: if qemu would support >64K packets - add jumbo option check
829 * something like that:
830 * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {'
831 */
832 if (ip6->ip6_plen == 0) {
833 if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) {
834 ip6->ip6_plen = htons(pkt->payload_len);
835 }
836 /*
837 * TODO: if qemu would support >64K packets
838 * add jumbo option for packets greater then 65,535 bytes
839 */
840 }
841 }
842 }