]> git.ipfire.org Git - thirdparty/qemu.git/blame - hw/net/virtio-net.c
virtio-net: Allow qemu_announce_self to trigger virtio announcements
[thirdparty/qemu.git] / hw / net / virtio-net.c
CommitLineData
fbe78f4f
AL
1/*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
1de7afc9 15#include "qemu/iov.h"
0d09e41a 16#include "hw/virtio/virtio.h"
1422e32d 17#include "net/net.h"
7200ac3c 18#include "net/checksum.h"
a8ed73f7 19#include "net/tap.h"
1de7afc9
PB
20#include "qemu/error-report.h"
21#include "qemu/timer.h"
0d09e41a
PB
22#include "hw/virtio/virtio-net.h"
23#include "net/vhost_net.h"
9d8c6a25 24#include "net/announce.h"
17ec5a86 25#include "hw/virtio/virtio-bus.h"
e688df6b 26#include "qapi/error.h"
9af23989 27#include "qapi/qapi-events-net.h"
1399c60d 28#include "hw/virtio/virtio-access.h"
f8d806c9 29#include "migration/misc.h"
9473939e 30#include "standard-headers/linux/ethtool.h"
9d8c6a25 31#include "trace.h"
fbe78f4f 32
0ce0e8f4 33#define VIRTIO_NET_VM_VERSION 11
b6503ed9 34
4ffb17f5 35#define MAC_TABLE_ENTRIES 64
f21c0ed9 36#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
9d6271b8 37
1c0fbfa3
MT
38/* previously fixed value */
39#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
9b02e161
WW
40#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
41
1c0fbfa3
MT
42/* for now, only allow larger queues; with virtio-1, guest can downsize */
43#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
9b02e161 44#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
1c0fbfa3 45
2974e916
YB
46#define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
47
48#define VIRTIO_NET_TCP_FLAG 0x3F
49#define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
50
51/* IPv4 max payload, 16 bits in the header */
52#define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
53#define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
54
55/* header length value in ip header without option */
56#define VIRTIO_NET_IP4_HEADER_LENGTH 5
57
58#define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
59#define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
60
61/* Purge coalesced packets timer interval, This value affects the performance
62 a lot, and should be tuned carefully, '300000'(300us) is the recommended
63 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
64 tso/gso/gro 'off'. */
65#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
66
67/* temporary until standard header include it */
68#if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
69
70#define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc_ext data in csum_ fields */
d47e5e31 71#define VIRTIO_NET_F_RSC_EXT 61
2974e916
YB
72
73static inline __virtio16 *virtio_net_rsc_ext_num_packets(
74 struct virtio_net_hdr *hdr)
75{
76 return &hdr->csum_start;
77}
78
79static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
80 struct virtio_net_hdr *hdr)
81{
82 return &hdr->csum_offset;
83}
84
85#endif
86
14f9b664 87static VirtIOFeature feature_sizes[] = {
127833ee 88 {.flags = 1ULL << VIRTIO_NET_F_MAC,
ba550851 89 .end = virtio_endof(struct virtio_net_config, mac)},
127833ee 90 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
ba550851 91 .end = virtio_endof(struct virtio_net_config, status)},
127833ee 92 {.flags = 1ULL << VIRTIO_NET_F_MQ,
ba550851 93 .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
127833ee 94 {.flags = 1ULL << VIRTIO_NET_F_MTU,
ba550851 95 .end = virtio_endof(struct virtio_net_config, mtu)},
9473939e 96 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
ba550851 97 .end = virtio_endof(struct virtio_net_config, duplex)},
14f9b664
JL
98 {}
99};
100
fed699f9 101static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
0c87e93e
JW
102{
103 VirtIONet *n = qemu_get_nic_opaque(nc);
104
fed699f9 105 return &n->vqs[nc->queue_index];
0c87e93e 106}
fed699f9
JW
107
108static int vq2q(int queue_index)
109{
110 return queue_index / 2;
111}
112
fbe78f4f
AL
113/* TODO
114 * - we could suppress RX interrupt if we were so inclined.
115 */
116
0f03eca6 117static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
fbe78f4f 118{
17a0ca55 119 VirtIONet *n = VIRTIO_NET(vdev);
fbe78f4f
AL
120 struct virtio_net_config netcfg;
121
1399c60d
RR
122 virtio_stw_p(vdev, &netcfg.status, n->status);
123 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
a93e599d 124 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
79674068 125 memcpy(netcfg.mac, n->mac, ETH_ALEN);
9473939e
JB
126 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
127 netcfg.duplex = n->net_conf.duplex;
14f9b664 128 memcpy(config, &netcfg, n->config_size);
fbe78f4f
AL
129}
130
0f03eca6
AL
131static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
132{
17a0ca55 133 VirtIONet *n = VIRTIO_NET(vdev);
14f9b664 134 struct virtio_net_config netcfg = {};
0f03eca6 135
14f9b664 136 memcpy(&netcfg, config, n->config_size);
0f03eca6 137
95129d6f
CH
138 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
139 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
c1943a3f 140 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
79674068 141 memcpy(n->mac, netcfg.mac, ETH_ALEN);
b356f76d 142 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
0f03eca6
AL
143 }
144}
145
783e7706
MT
146static bool virtio_net_started(VirtIONet *n, uint8_t status)
147{
17a0ca55 148 VirtIODevice *vdev = VIRTIO_DEVICE(n);
783e7706 149 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
17a0ca55 150 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
783e7706
MT
151}
152
b2c929f0
DDAG
153static void virtio_net_announce_notify(VirtIONet *net)
154{
155 VirtIODevice *vdev = VIRTIO_DEVICE(net);
156 trace_virtio_net_announce_notify();
157
158 net->status |= VIRTIO_NET_S_ANNOUNCE;
159 virtio_notify_config(vdev);
160}
161
f57fcf70
JW
162static void virtio_net_announce_timer(void *opaque)
163{
164 VirtIONet *n = opaque;
9d8c6a25 165 trace_virtio_net_announce_timer(n->announce_timer.round);
f57fcf70 166
9d8c6a25 167 n->announce_timer.round--;
b2c929f0
DDAG
168 virtio_net_announce_notify(n);
169}
170
171static void virtio_net_announce(NetClientState *nc)
172{
173 VirtIONet *n = qemu_get_nic_opaque(nc);
174 VirtIODevice *vdev = VIRTIO_DEVICE(n);
175
176 /*
177 * Make sure the virtio migration announcement timer isn't running
178 * If it is, let it trigger announcement so that we do not cause
179 * confusion.
180 */
181 if (n->announce_timer.round) {
182 return;
183 }
184
185 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
186 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
187 virtio_net_announce_notify(n);
188 }
f57fcf70
JW
189}
190
783e7706 191static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
afbaa7b4 192{
17a0ca55 193 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b356f76d 194 NetClientState *nc = qemu_get_queue(n->nic);
fed699f9 195 int queues = n->multiqueue ? n->max_queues : 1;
b356f76d 196
ed8b4afe 197 if (!get_vhost_net(nc->peer)) {
afbaa7b4
MT
198 return;
199 }
fed699f9 200
8c1ac475
RK
201 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
202 !!n->vhost_started) {
afbaa7b4
MT
203 return;
204 }
205 if (!n->vhost_started) {
086abc1c
MT
206 int r, i;
207
1bfa316c
GK
208 if (n->needs_vnet_hdr_swap) {
209 error_report("backend does not support %s vnet headers; "
210 "falling back on userspace virtio",
211 virtio_is_big_endian(vdev) ? "BE" : "LE");
212 return;
213 }
214
086abc1c
MT
215 /* Any packets outstanding? Purge them to avoid touching rings
216 * when vhost is running.
217 */
218 for (i = 0; i < queues; i++) {
219 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
220
221 /* Purge both directions: TX and RX. */
222 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
223 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
224 }
225
a93e599d
MC
226 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
227 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
228 if (r < 0) {
229 error_report("%uBytes MTU not supported by the backend",
230 n->net_conf.mtu);
231
232 return;
233 }
234 }
235
1830b80f 236 n->vhost_started = 1;
17a0ca55 237 r = vhost_net_start(vdev, n->nic->ncs, queues);
afbaa7b4 238 if (r < 0) {
e7b43f7e
SH
239 error_report("unable to start vhost net: %d: "
240 "falling back on userspace virtio", -r);
1830b80f 241 n->vhost_started = 0;
afbaa7b4
MT
242 }
243 } else {
17a0ca55 244 vhost_net_stop(vdev, n->nic->ncs, queues);
afbaa7b4
MT
245 n->vhost_started = 0;
246 }
247}
248
1bfa316c
GK
249static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
250 NetClientState *peer,
251 bool enable)
252{
253 if (virtio_is_big_endian(vdev)) {
254 return qemu_set_vnet_be(peer, enable);
255 } else {
256 return qemu_set_vnet_le(peer, enable);
257 }
258}
259
260static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
261 int queues, bool enable)
262{
263 int i;
264
265 for (i = 0; i < queues; i++) {
266 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
267 enable) {
268 while (--i >= 0) {
269 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
270 }
271
272 return true;
273 }
274 }
275
276 return false;
277}
278
279static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
280{
281 VirtIODevice *vdev = VIRTIO_DEVICE(n);
282 int queues = n->multiqueue ? n->max_queues : 1;
283
284 if (virtio_net_started(n, status)) {
285 /* Before using the device, we tell the network backend about the
286 * endianness to use when parsing vnet headers. If the backend
287 * can't do it, we fallback onto fixing the headers in the core
288 * virtio-net code.
289 */
290 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
291 queues, true);
292 } else if (virtio_net_started(n, vdev->status)) {
293 /* After using the device, we need to reset the network backend to
294 * the default (guest native endianness), otherwise the guest may
295 * lose network connectivity if it is rebooted into a different
296 * endianness.
297 */
298 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
299 }
300}
301
283e2c2a
YB
302static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
303{
304 unsigned int dropped = virtqueue_drop_all(vq);
305 if (dropped) {
306 virtio_notify(vdev, vq);
307 }
308}
309
783e7706
MT
310static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
311{
17a0ca55 312 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9
JW
313 VirtIONetQueue *q;
314 int i;
315 uint8_t queue_status;
783e7706 316
1bfa316c 317 virtio_net_vnet_endian_status(n, status);
783e7706
MT
318 virtio_net_vhost_status(n, status);
319
fed699f9 320 for (i = 0; i < n->max_queues; i++) {
38705bb5
FZ
321 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
322 bool queue_started;
fed699f9 323 q = &n->vqs[i];
783e7706 324
fed699f9
JW
325 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
326 queue_status = 0;
783e7706 327 } else {
fed699f9 328 queue_status = status;
783e7706 329 }
38705bb5
FZ
330 queue_started =
331 virtio_net_started(n, queue_status) && !n->vhost_started;
332
333 if (queue_started) {
334 qemu_flush_queued_packets(ncs);
335 }
fed699f9
JW
336
337 if (!q->tx_waiting) {
338 continue;
339 }
340
38705bb5 341 if (queue_started) {
fed699f9 342 if (q->tx_timer) {
bc72ad67
AB
343 timer_mod(q->tx_timer,
344 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
fed699f9
JW
345 } else {
346 qemu_bh_schedule(q->tx_bh);
347 }
783e7706 348 } else {
fed699f9 349 if (q->tx_timer) {
bc72ad67 350 timer_del(q->tx_timer);
fed699f9
JW
351 } else {
352 qemu_bh_cancel(q->tx_bh);
353 }
283e2c2a 354 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
70e53e6e
JW
355 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
356 vdev->vm_running) {
283e2c2a
YB
357 /* if tx is waiting we are likely have some packets in tx queue
358 * and disabled notification */
359 q->tx_waiting = 0;
360 virtio_queue_set_notification(q->tx_vq, 1);
361 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
362 }
783e7706
MT
363 }
364 }
365}
366
4e68f7a0 367static void virtio_net_set_link_status(NetClientState *nc)
554c97dd 368{
cc1f0f45 369 VirtIONet *n = qemu_get_nic_opaque(nc);
17a0ca55 370 VirtIODevice *vdev = VIRTIO_DEVICE(n);
554c97dd
AL
371 uint16_t old_status = n->status;
372
eb6b6c12 373 if (nc->link_down)
554c97dd
AL
374 n->status &= ~VIRTIO_NET_S_LINK_UP;
375 else
376 n->status |= VIRTIO_NET_S_LINK_UP;
377
378 if (n->status != old_status)
17a0ca55 379 virtio_notify_config(vdev);
afbaa7b4 380
17a0ca55 381 virtio_net_set_status(vdev, vdev->status);
554c97dd
AL
382}
383
b1be4280
AK
384static void rxfilter_notify(NetClientState *nc)
385{
b1be4280
AK
386 VirtIONet *n = qemu_get_nic_opaque(nc);
387
388 if (nc->rxfilter_notify_enabled) {
96e35046 389 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
06150279 390 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
3ab72385 391 n->netclient_name, path);
96e35046 392 g_free(path);
b1be4280
AK
393
394 /* disable event notification to avoid events flooding */
395 nc->rxfilter_notify_enabled = 0;
396 }
397}
398
f7bc8ef8
AK
399static intList *get_vlan_table(VirtIONet *n)
400{
401 intList *list, *entry;
402 int i, j;
403
404 list = NULL;
405 for (i = 0; i < MAX_VLAN >> 5; i++) {
406 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
407 if (n->vlans[i] & (1U << j)) {
408 entry = g_malloc0(sizeof(*entry));
409 entry->value = (i << 5) + j;
410 entry->next = list;
411 list = entry;
412 }
413 }
414 }
415
416 return list;
417}
418
b1be4280
AK
419static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
420{
421 VirtIONet *n = qemu_get_nic_opaque(nc);
f7bc8ef8 422 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b1be4280
AK
423 RxFilterInfo *info;
424 strList *str_list, *entry;
f7bc8ef8 425 int i;
b1be4280
AK
426
427 info = g_malloc0(sizeof(*info));
428 info->name = g_strdup(nc->name);
429 info->promiscuous = n->promisc;
430
431 if (n->nouni) {
432 info->unicast = RX_STATE_NONE;
433 } else if (n->alluni) {
434 info->unicast = RX_STATE_ALL;
435 } else {
436 info->unicast = RX_STATE_NORMAL;
437 }
438
439 if (n->nomulti) {
440 info->multicast = RX_STATE_NONE;
441 } else if (n->allmulti) {
442 info->multicast = RX_STATE_ALL;
443 } else {
444 info->multicast = RX_STATE_NORMAL;
445 }
446
447 info->broadcast_allowed = n->nobcast;
448 info->multicast_overflow = n->mac_table.multi_overflow;
449 info->unicast_overflow = n->mac_table.uni_overflow;
450
b0575ba4 451 info->main_mac = qemu_mac_strdup_printf(n->mac);
b1be4280
AK
452
453 str_list = NULL;
454 for (i = 0; i < n->mac_table.first_multi; i++) {
455 entry = g_malloc0(sizeof(*entry));
b0575ba4 456 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
b1be4280
AK
457 entry->next = str_list;
458 str_list = entry;
459 }
460 info->unicast_table = str_list;
461
462 str_list = NULL;
463 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
464 entry = g_malloc0(sizeof(*entry));
b0575ba4 465 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
b1be4280
AK
466 entry->next = str_list;
467 str_list = entry;
468 }
469 info->multicast_table = str_list;
f7bc8ef8 470 info->vlan_table = get_vlan_table(n);
b1be4280 471
95129d6f 472 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
f7bc8ef8
AK
473 info->vlan = RX_STATE_ALL;
474 } else if (!info->vlan_table) {
475 info->vlan = RX_STATE_NONE;
476 } else {
477 info->vlan = RX_STATE_NORMAL;
b1be4280 478 }
b1be4280
AK
479
480 /* enable event notification after query */
481 nc->rxfilter_notify_enabled = 1;
482
483 return info;
484}
485
002437cd
AL
486static void virtio_net_reset(VirtIODevice *vdev)
487{
17a0ca55 488 VirtIONet *n = VIRTIO_NET(vdev);
94b52958 489 int i;
002437cd
AL
490
491 /* Reset back to compatibility mode */
492 n->promisc = 1;
493 n->allmulti = 0;
015cb166
AW
494 n->alluni = 0;
495 n->nomulti = 0;
496 n->nouni = 0;
497 n->nobcast = 0;
fed699f9
JW
498 /* multiqueue is disabled by default */
499 n->curr_queues = 1;
9d8c6a25
DDAG
500 timer_del(n->announce_timer.tm);
501 n->announce_timer.round = 0;
f57fcf70 502 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
b6503ed9 503
f21c0ed9 504 /* Flush any MAC and VLAN filter table state */
b6503ed9 505 n->mac_table.in_use = 0;
2d9aba39 506 n->mac_table.first_multi = 0;
8fd2a2f1
AW
507 n->mac_table.multi_overflow = 0;
508 n->mac_table.uni_overflow = 0;
b6503ed9 509 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
41dc8a67 510 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
702d66a8 511 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
f21c0ed9 512 memset(n->vlans, 0, MAX_VLAN >> 3);
94b52958
GK
513
514 /* Flush any async TX */
515 for (i = 0; i < n->max_queues; i++) {
516 NetClientState *nc = qemu_get_subqueue(n->nic, i);
517
518 if (nc->peer) {
519 qemu_flush_or_purge_queued_packets(nc->peer, true);
520 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
521 }
522 }
002437cd
AL
523}
524
6e371ab8 525static void peer_test_vnet_hdr(VirtIONet *n)
3a330134 526{
b356f76d
JW
527 NetClientState *nc = qemu_get_queue(n->nic);
528 if (!nc->peer) {
6e371ab8 529 return;
b356f76d 530 }
3a330134 531
d6085e3a 532 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
6e371ab8 533}
3a330134 534
6e371ab8
MT
535static int peer_has_vnet_hdr(VirtIONet *n)
536{
3a330134
MM
537 return n->has_vnet_hdr;
538}
539
0ce0e8f4
MM
540static int peer_has_ufo(VirtIONet *n)
541{
542 if (!peer_has_vnet_hdr(n))
543 return 0;
544
d6085e3a 545 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
0ce0e8f4
MM
546
547 return n->has_ufo;
548}
549
bb9d17f8
CH
550static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
551 int version_1)
ff3a8066 552{
fed699f9
JW
553 int i;
554 NetClientState *nc;
555
ff3a8066
MT
556 n->mergeable_rx_bufs = mergeable_rx_bufs;
557
bb9d17f8
CH
558 if (version_1) {
559 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
560 } else {
561 n->guest_hdr_len = n->mergeable_rx_bufs ?
562 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
563 sizeof(struct virtio_net_hdr);
564 }
ff3a8066 565
fed699f9
JW
566 for (i = 0; i < n->max_queues; i++) {
567 nc = qemu_get_subqueue(n->nic, i);
568
569 if (peer_has_vnet_hdr(n) &&
d6085e3a
SH
570 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
571 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
fed699f9
JW
572 n->host_hdr_len = n->guest_hdr_len;
573 }
ff3a8066
MT
574 }
575}
576
2eef278b
MT
577static int virtio_net_max_tx_queue_size(VirtIONet *n)
578{
579 NetClientState *peer = n->nic_conf.peers.ncs[0];
580
581 /*
582 * Backends other than vhost-user don't support max queue size.
583 */
584 if (!peer) {
585 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
586 }
587
588 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
589 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
590 }
591
592 return VIRTQUEUE_MAX_SIZE;
593}
594
fed699f9
JW
595static int peer_attach(VirtIONet *n, int index)
596{
597 NetClientState *nc = qemu_get_subqueue(n->nic, index);
598
599 if (!nc->peer) {
600 return 0;
601 }
602
f394b2e2 603 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
7263a0ad
CO
604 vhost_set_vring_enable(nc->peer, 1);
605 }
606
f394b2e2 607 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
fed699f9
JW
608 return 0;
609 }
610
1074b879
JW
611 if (n->max_queues == 1) {
612 return 0;
613 }
614
fed699f9
JW
615 return tap_enable(nc->peer);
616}
617
618static int peer_detach(VirtIONet *n, int index)
619{
620 NetClientState *nc = qemu_get_subqueue(n->nic, index);
621
622 if (!nc->peer) {
623 return 0;
624 }
625
f394b2e2 626 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
7263a0ad
CO
627 vhost_set_vring_enable(nc->peer, 0);
628 }
629
f394b2e2 630 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
fed699f9
JW
631 return 0;
632 }
633
634 return tap_disable(nc->peer);
635}
636
637static void virtio_net_set_queues(VirtIONet *n)
638{
639 int i;
ddfa83ea 640 int r;
fed699f9 641
68b5f314
YB
642 if (n->nic->peer_deleted) {
643 return;
644 }
645
fed699f9
JW
646 for (i = 0; i < n->max_queues; i++) {
647 if (i < n->curr_queues) {
ddfa83ea
JS
648 r = peer_attach(n, i);
649 assert(!r);
fed699f9 650 } else {
ddfa83ea
JS
651 r = peer_detach(n, i);
652 assert(!r);
fed699f9
JW
653 }
654 }
655}
656
ec57db16 657static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
fed699f9 658
9d5b731d
JW
659static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
660 Error **errp)
fbe78f4f 661{
17a0ca55 662 VirtIONet *n = VIRTIO_NET(vdev);
b356f76d 663 NetClientState *nc = qemu_get_queue(n->nic);
fbe78f4f 664
da3e8a23
SZ
665 /* Firstly sync all virtio-net possible supported features */
666 features |= n->host_features;
667
0cd09c3a 668 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
c9f79a3f 669
6e371ab8 670 if (!peer_has_vnet_hdr(n)) {
0cd09c3a
CH
671 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
672 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
673 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
674 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
8172539d 675
0cd09c3a
CH
676 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
677 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
678 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
679 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
8172539d 680 }
3a330134 681
8172539d 682 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
0cd09c3a
CH
683 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
684 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
3a330134
MM
685 }
686
ed8b4afe 687 if (!get_vhost_net(nc->peer)) {
9bc6304c
MT
688 return features;
689 }
2974e916 690
75ebec11
MC
691 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
692 vdev->backend_features = features;
693
694 if (n->mtu_bypass_backend &&
695 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
696 features |= (1ULL << VIRTIO_NET_F_MTU);
697 }
698
699 return features;
fbe78f4f
AL
700}
701
019a3edb 702static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
8eca6b1b 703{
019a3edb 704 uint64_t features = 0;
8eca6b1b
AL
705
706 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
707 * but also these: */
0cd09c3a
CH
708 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
709 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
710 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
711 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
712 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
8eca6b1b 713
8172539d 714 return features;
8eca6b1b
AL
715}
716
644c9858
DF
717static void virtio_net_apply_guest_offloads(VirtIONet *n)
718{
ad37bb3b 719 qemu_set_offload(qemu_get_queue(n->nic)->peer,
644c9858
DF
720 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
721 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
722 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
723 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
724 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
725}
726
727static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
728{
729 static const uint64_t guest_offloads_mask =
730 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
731 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
732 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
733 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
734 (1ULL << VIRTIO_NET_F_GUEST_UFO);
735
736 return guest_offloads_mask & features;
737}
738
739static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
740{
741 VirtIODevice *vdev = VIRTIO_DEVICE(n);
742 return virtio_net_guest_offloads_by_features(vdev->guest_features);
743}
744
d5aaa1b0 745static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
fbe78f4f 746{
17a0ca55 747 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9
JW
748 int i;
749
75ebec11
MC
750 if (n->mtu_bypass_backend &&
751 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
752 features &= ~(1ULL << VIRTIO_NET_F_MTU);
753 }
754
ef546f12 755 virtio_net_set_multiqueue(n,
95129d6f 756 virtio_has_feature(features, VIRTIO_NET_F_MQ));
fbe78f4f 757
ef546f12 758 virtio_net_set_mrg_rx_bufs(n,
95129d6f
CH
759 virtio_has_feature(features,
760 VIRTIO_NET_F_MRG_RXBUF),
761 virtio_has_feature(features,
762 VIRTIO_F_VERSION_1));
f5436dd9 763
2974e916
YB
764 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
765 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
766 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
767 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
768
f5436dd9 769 if (n->has_vnet_hdr) {
644c9858
DF
770 n->curr_guest_offloads =
771 virtio_net_guest_offloads_by_features(features);
772 virtio_net_apply_guest_offloads(n);
f5436dd9 773 }
fed699f9
JW
774
775 for (i = 0; i < n->max_queues; i++) {
776 NetClientState *nc = qemu_get_subqueue(n->nic, i);
777
ed8b4afe 778 if (!get_vhost_net(nc->peer)) {
fed699f9
JW
779 continue;
780 }
ed8b4afe 781 vhost_net_ack_features(get_vhost_net(nc->peer), features);
dc14a397 782 }
0b1eaa88 783
95129d6f 784 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
0b1eaa88
SF
785 memset(n->vlans, 0, MAX_VLAN >> 3);
786 } else {
787 memset(n->vlans, 0xff, MAX_VLAN >> 3);
788 }
fbe78f4f
AL
789}
790
002437cd 791static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
921ac5d0 792 struct iovec *iov, unsigned int iov_cnt)
002437cd
AL
793{
794 uint8_t on;
921ac5d0 795 size_t s;
b1be4280 796 NetClientState *nc = qemu_get_queue(n->nic);
002437cd 797
921ac5d0
MT
798 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
799 if (s != sizeof(on)) {
800 return VIRTIO_NET_ERR;
002437cd
AL
801 }
802
dd23454b 803 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
002437cd 804 n->promisc = on;
dd23454b 805 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
002437cd 806 n->allmulti = on;
dd23454b 807 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
015cb166 808 n->alluni = on;
dd23454b 809 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
015cb166 810 n->nomulti = on;
dd23454b 811 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
015cb166 812 n->nouni = on;
dd23454b 813 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
015cb166 814 n->nobcast = on;
921ac5d0 815 } else {
002437cd 816 return VIRTIO_NET_ERR;
921ac5d0 817 }
002437cd 818
b1be4280
AK
819 rxfilter_notify(nc);
820
002437cd
AL
821 return VIRTIO_NET_OK;
822}
823
644c9858
DF
824static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
825 struct iovec *iov, unsigned int iov_cnt)
826{
827 VirtIODevice *vdev = VIRTIO_DEVICE(n);
828 uint64_t offloads;
829 size_t s;
830
95129d6f 831 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
644c9858
DF
832 return VIRTIO_NET_ERR;
833 }
834
835 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
836 if (s != sizeof(offloads)) {
837 return VIRTIO_NET_ERR;
838 }
839
840 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
841 uint64_t supported_offloads;
842
189ae6bb
JW
843 offloads = virtio_ldq_p(vdev, &offloads);
844
644c9858
DF
845 if (!n->has_vnet_hdr) {
846 return VIRTIO_NET_ERR;
847 }
848
2974e916
YB
849 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
850 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
851 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
852 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
853 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
854
644c9858
DF
855 supported_offloads = virtio_net_supported_guest_offloads(n);
856 if (offloads & ~supported_offloads) {
857 return VIRTIO_NET_ERR;
858 }
859
860 n->curr_guest_offloads = offloads;
861 virtio_net_apply_guest_offloads(n);
862
863 return VIRTIO_NET_OK;
864 } else {
865 return VIRTIO_NET_ERR;
866 }
867}
868
b6503ed9 869static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
921ac5d0 870 struct iovec *iov, unsigned int iov_cnt)
b6503ed9 871{
1399c60d 872 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b6503ed9 873 struct virtio_net_ctrl_mac mac_data;
921ac5d0 874 size_t s;
b1be4280 875 NetClientState *nc = qemu_get_queue(n->nic);
b6503ed9 876
c1943a3f
AK
877 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
878 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
879 return VIRTIO_NET_ERR;
880 }
881 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
882 assert(s == sizeof(n->mac));
b356f76d 883 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
b1be4280
AK
884 rxfilter_notify(nc);
885
c1943a3f
AK
886 return VIRTIO_NET_OK;
887 }
888
921ac5d0 889 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
b6503ed9 890 return VIRTIO_NET_ERR;
921ac5d0 891 }
b6503ed9 892
cae2e556
AK
893 int in_use = 0;
894 int first_multi = 0;
895 uint8_t uni_overflow = 0;
896 uint8_t multi_overflow = 0;
897 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
b6503ed9 898
921ac5d0
MT
899 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
900 sizeof(mac_data.entries));
1399c60d 901 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
921ac5d0 902 if (s != sizeof(mac_data.entries)) {
b1be4280 903 goto error;
921ac5d0
MT
904 }
905 iov_discard_front(&iov, &iov_cnt, s);
b6503ed9 906
921ac5d0 907 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
b1be4280 908 goto error;
921ac5d0 909 }
b6503ed9
AL
910
911 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
cae2e556 912 s = iov_to_buf(iov, iov_cnt, 0, macs,
921ac5d0
MT
913 mac_data.entries * ETH_ALEN);
914 if (s != mac_data.entries * ETH_ALEN) {
b1be4280 915 goto error;
921ac5d0 916 }
cae2e556 917 in_use += mac_data.entries;
b6503ed9 918 } else {
cae2e556 919 uni_overflow = 1;
b6503ed9
AL
920 }
921
921ac5d0
MT
922 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
923
cae2e556 924 first_multi = in_use;
2d9aba39 925
921ac5d0
MT
926 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
927 sizeof(mac_data.entries));
1399c60d 928 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
921ac5d0 929 if (s != sizeof(mac_data.entries)) {
b1be4280 930 goto error;
921ac5d0
MT
931 }
932
933 iov_discard_front(&iov, &iov_cnt, s);
b6503ed9 934
921ac5d0 935 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
b1be4280 936 goto error;
921ac5d0 937 }
b6503ed9 938
edc24385 939 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
cae2e556 940 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
921ac5d0
MT
941 mac_data.entries * ETH_ALEN);
942 if (s != mac_data.entries * ETH_ALEN) {
b1be4280 943 goto error;
8fd2a2f1 944 }
cae2e556 945 in_use += mac_data.entries;
921ac5d0 946 } else {
cae2e556 947 multi_overflow = 1;
b6503ed9
AL
948 }
949
cae2e556
AK
950 n->mac_table.in_use = in_use;
951 n->mac_table.first_multi = first_multi;
952 n->mac_table.uni_overflow = uni_overflow;
953 n->mac_table.multi_overflow = multi_overflow;
954 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
955 g_free(macs);
b1be4280
AK
956 rxfilter_notify(nc);
957
b6503ed9 958 return VIRTIO_NET_OK;
b1be4280
AK
959
960error:
cae2e556 961 g_free(macs);
b1be4280 962 return VIRTIO_NET_ERR;
b6503ed9
AL
963}
964
f21c0ed9 965static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
921ac5d0 966 struct iovec *iov, unsigned int iov_cnt)
f21c0ed9 967{
1399c60d 968 VirtIODevice *vdev = VIRTIO_DEVICE(n);
f21c0ed9 969 uint16_t vid;
921ac5d0 970 size_t s;
b1be4280 971 NetClientState *nc = qemu_get_queue(n->nic);
f21c0ed9 972
921ac5d0 973 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1399c60d 974 vid = virtio_lduw_p(vdev, &vid);
921ac5d0 975 if (s != sizeof(vid)) {
f21c0ed9
AL
976 return VIRTIO_NET_ERR;
977 }
978
f21c0ed9
AL
979 if (vid >= MAX_VLAN)
980 return VIRTIO_NET_ERR;
981
982 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
983 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
984 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
985 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
986 else
987 return VIRTIO_NET_ERR;
988
b1be4280
AK
989 rxfilter_notify(nc);
990
f21c0ed9
AL
991 return VIRTIO_NET_OK;
992}
993
f57fcf70
JW
994static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
995 struct iovec *iov, unsigned int iov_cnt)
996{
9d8c6a25 997 trace_virtio_net_handle_announce(n->announce_timer.round);
f57fcf70
JW
998 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
999 n->status & VIRTIO_NET_S_ANNOUNCE) {
1000 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
9d8c6a25
DDAG
1001 if (n->announce_timer.round) {
1002 qemu_announce_timer_step(&n->announce_timer);
f57fcf70
JW
1003 }
1004 return VIRTIO_NET_OK;
1005 } else {
1006 return VIRTIO_NET_ERR;
1007 }
1008}
1009
fed699f9 1010static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
f8f7c533 1011 struct iovec *iov, unsigned int iov_cnt)
fed699f9 1012{
17a0ca55 1013 VirtIODevice *vdev = VIRTIO_DEVICE(n);
f8f7c533
JW
1014 struct virtio_net_ctrl_mq mq;
1015 size_t s;
1016 uint16_t queues;
fed699f9 1017
f8f7c533
JW
1018 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1019 if (s != sizeof(mq)) {
fed699f9
JW
1020 return VIRTIO_NET_ERR;
1021 }
1022
1023 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1024 return VIRTIO_NET_ERR;
1025 }
1026
1399c60d 1027 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
fed699f9 1028
f8f7c533
JW
1029 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1030 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1031 queues > n->max_queues ||
fed699f9
JW
1032 !n->multiqueue) {
1033 return VIRTIO_NET_ERR;
1034 }
1035
f8f7c533 1036 n->curr_queues = queues;
fed699f9
JW
1037 /* stop the backend before changing the number of queues to avoid handling a
1038 * disabled queue */
17a0ca55 1039 virtio_net_set_status(vdev, vdev->status);
fed699f9
JW
1040 virtio_net_set_queues(n);
1041
1042 return VIRTIO_NET_OK;
1043}
ba7eadb5 1044
3d11d36c
AL
1045static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1046{
17a0ca55 1047 VirtIONet *n = VIRTIO_NET(vdev);
3d11d36c
AL
1048 struct virtio_net_ctrl_hdr ctrl;
1049 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
51b19ebe 1050 VirtQueueElement *elem;
921ac5d0 1051 size_t s;
771b6ed3 1052 struct iovec *iov, *iov2;
921ac5d0 1053 unsigned int iov_cnt;
3d11d36c 1054
51b19ebe
PB
1055 for (;;) {
1056 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1057 if (!elem) {
1058 break;
1059 }
1060 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
1061 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
ba7eadb5
GK
1062 virtio_error(vdev, "virtio-net ctrl missing headers");
1063 virtqueue_detach_element(vq, elem, 0);
1064 g_free(elem);
1065 break;
3d11d36c
AL
1066 }
1067
51b19ebe
PB
1068 iov_cnt = elem->out_num;
1069 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
921ac5d0
MT
1070 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
1071 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
1072 if (s != sizeof(ctrl)) {
1073 status = VIRTIO_NET_ERR;
dd23454b 1074 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
921ac5d0
MT
1075 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
1076 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1077 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
1078 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1079 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
f57fcf70
JW
1080 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1081 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
fed699f9 1082 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
f8f7c533 1083 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
644c9858
DF
1084 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1085 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
3d11d36c
AL
1086 }
1087
51b19ebe 1088 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
921ac5d0 1089 assert(s == sizeof(status));
3d11d36c 1090
51b19ebe 1091 virtqueue_push(vq, elem, sizeof(status));
3d11d36c 1092 virtio_notify(vdev, vq);
771b6ed3 1093 g_free(iov2);
51b19ebe 1094 g_free(elem);
3d11d36c
AL
1095 }
1096}
1097
fbe78f4f
AL
1098/* RX */
1099
1100static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1101{
17a0ca55 1102 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 1103 int queue_index = vq2q(virtio_get_queue_index(vq));
8aeff62d 1104
fed699f9 1105 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
fbe78f4f
AL
1106}
1107
4e68f7a0 1108static int virtio_net_can_receive(NetClientState *nc)
fbe78f4f 1109{
cc1f0f45 1110 VirtIONet *n = qemu_get_nic_opaque(nc);
17a0ca55 1111 VirtIODevice *vdev = VIRTIO_DEVICE(n);
fed699f9 1112 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
0c87e93e 1113
17a0ca55 1114 if (!vdev->vm_running) {
95477323
MT
1115 return 0;
1116 }
cdd5cc12 1117
fed699f9
JW
1118 if (nc->queue_index >= n->curr_queues) {
1119 return 0;
1120 }
1121
0c87e93e 1122 if (!virtio_queue_ready(q->rx_vq) ||
17a0ca55 1123 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
fbe78f4f 1124 return 0;
0c87e93e 1125 }
fbe78f4f 1126
cdd5cc12
MM
1127 return 1;
1128}
1129
0c87e93e 1130static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
cdd5cc12 1131{
0c87e93e
JW
1132 VirtIONet *n = q->n;
1133 if (virtio_queue_empty(q->rx_vq) ||
fbe78f4f 1134 (n->mergeable_rx_bufs &&
0c87e93e
JW
1135 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1136 virtio_queue_set_notification(q->rx_vq, 1);
06b12970
TL
1137
1138 /* To avoid a race condition where the guest has made some buffers
1139 * available after the above check but before notification was
1140 * enabled, check for available buffers again.
1141 */
0c87e93e 1142 if (virtio_queue_empty(q->rx_vq) ||
06b12970 1143 (n->mergeable_rx_bufs &&
0c87e93e 1144 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
06b12970 1145 return 0;
0c87e93e 1146 }
fbe78f4f
AL
1147 }
1148
0c87e93e 1149 virtio_queue_set_notification(q->rx_vq, 0);
fbe78f4f
AL
1150 return 1;
1151}
1152
1399c60d 1153static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
032a74a1 1154{
1399c60d
RR
1155 virtio_tswap16s(vdev, &hdr->hdr_len);
1156 virtio_tswap16s(vdev, &hdr->gso_size);
1157 virtio_tswap16s(vdev, &hdr->csum_start);
1158 virtio_tswap16s(vdev, &hdr->csum_offset);
032a74a1
CLG
1159}
1160
1d41b0c1
AL
1161/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1162 * it never finds out that the packets don't have valid checksums. This
1163 * causes dhclient to get upset. Fedora's carried a patch for ages to
1164 * fix this with Xen but it hasn't appeared in an upstream release of
1165 * dhclient yet.
1166 *
1167 * To avoid breaking existing guests, we catch udp packets and add
1168 * checksums. This is terrible but it's better than hacking the guest
1169 * kernels.
1170 *
1171 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1172 * we should provide a mechanism to disable it to avoid polluting the host
1173 * cache.
1174 */
1175static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
22cc84db 1176 uint8_t *buf, size_t size)
1d41b0c1
AL
1177{
1178 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1179 (size > 27 && size < 1500) && /* normal sized MTU */
1180 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1181 (buf[23] == 17) && /* ip.protocol == UDP */
1182 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
22cc84db 1183 net_checksum_calculate(buf, size);
1d41b0c1
AL
1184 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1185 }
1186}
1187
280598b7
MT
1188static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1189 const void *buf, size_t size)
fbe78f4f 1190{
3a330134 1191 if (n->has_vnet_hdr) {
22cc84db
MT
1192 /* FIXME this cast is evil */
1193 void *wbuf = (void *)buf;
280598b7
MT
1194 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1195 size - n->host_hdr_len);
1bfa316c
GK
1196
1197 if (n->needs_vnet_hdr_swap) {
1198 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1199 }
280598b7 1200 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
22cc84db
MT
1201 } else {
1202 struct virtio_net_hdr hdr = {
1203 .flags = 0,
1204 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1205 };
1206 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
3a330134 1207 }
fbe78f4f
AL
1208}
1209
3831ab20
AL
1210static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1211{
1212 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
f21c0ed9 1213 static const uint8_t vlan[] = {0x81, 0x00};
3831ab20 1214 uint8_t *ptr = (uint8_t *)buf;
b6503ed9 1215 int i;
3831ab20
AL
1216
1217 if (n->promisc)
1218 return 1;
1219
e043ebc6 1220 ptr += n->host_hdr_len;
3a330134 1221
f21c0ed9 1222 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
7542d3e7 1223 int vid = lduw_be_p(ptr + 14) & 0xfff;
f21c0ed9
AL
1224 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1225 return 0;
1226 }
1227
bbe2f399
AW
1228 if (ptr[0] & 1) { // multicast
1229 if (!memcmp(ptr, bcast, sizeof(bcast))) {
015cb166
AW
1230 return !n->nobcast;
1231 } else if (n->nomulti) {
1232 return 0;
8fd2a2f1 1233 } else if (n->allmulti || n->mac_table.multi_overflow) {
bbe2f399
AW
1234 return 1;
1235 }
2d9aba39
AW
1236
1237 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1238 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1239 return 1;
1240 }
1241 }
bbe2f399 1242 } else { // unicast
015cb166
AW
1243 if (n->nouni) {
1244 return 0;
1245 } else if (n->alluni || n->mac_table.uni_overflow) {
8fd2a2f1
AW
1246 return 1;
1247 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
bbe2f399
AW
1248 return 1;
1249 }
3831ab20 1250
2d9aba39
AW
1251 for (i = 0; i < n->mac_table.first_multi; i++) {
1252 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1253 return 1;
1254 }
1255 }
b6503ed9
AL
1256 }
1257
3831ab20
AL
1258 return 0;
1259}
1260
97cd965c
PB
1261static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1262 size_t size)
fbe78f4f 1263{
cc1f0f45 1264 VirtIONet *n = qemu_get_nic_opaque(nc);
fed699f9 1265 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
17a0ca55 1266 VirtIODevice *vdev = VIRTIO_DEVICE(n);
63c58728
MT
1267 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1268 struct virtio_net_hdr_mrg_rxbuf mhdr;
1269 unsigned mhdr_cnt = 0;
22cc84db 1270 size_t offset, i, guest_offset;
fbe78f4f 1271
fed699f9 1272 if (!virtio_net_can_receive(nc)) {
cdd5cc12 1273 return -1;
b356f76d 1274 }
cdd5cc12 1275
940cda94 1276 /* hdr_len refers to the header we supply to the guest */
0c87e93e 1277 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
8aeff62d 1278 return 0;
0c87e93e 1279 }
fbe78f4f 1280
3831ab20 1281 if (!receive_filter(n, buf, size))
4f1c942b 1282 return size;
3831ab20 1283
fbe78f4f
AL
1284 offset = i = 0;
1285
1286 while (offset < size) {
51b19ebe 1287 VirtQueueElement *elem;
fbe78f4f 1288 int len, total;
51b19ebe 1289 const struct iovec *sg;
fbe78f4f 1290
22c253d9 1291 total = 0;
fbe78f4f 1292
51b19ebe
PB
1293 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1294 if (!elem) {
ba10b9c0
GK
1295 if (i) {
1296 virtio_error(vdev, "virtio-net unexpected empty queue: "
1297 "i %zd mergeable %d offset %zd, size %zd, "
1298 "guest hdr len %zd, host hdr len %zd "
1299 "guest features 0x%" PRIx64,
1300 i, n->mergeable_rx_bufs, offset, size,
1301 n->guest_hdr_len, n->host_hdr_len,
1302 vdev->guest_features);
1303 }
1304 return -1;
fbe78f4f
AL
1305 }
1306
51b19ebe 1307 if (elem->in_num < 1) {
ba10b9c0
GK
1308 virtio_error(vdev,
1309 "virtio-net receive queue contains no in buffers");
1310 virtqueue_detach_element(q->rx_vq, elem, 0);
1311 g_free(elem);
1312 return -1;
fbe78f4f
AL
1313 }
1314
51b19ebe 1315 sg = elem->in_sg;
fbe78f4f 1316 if (i == 0) {
c8d28e7e 1317 assert(offset == 0);
63c58728
MT
1318 if (n->mergeable_rx_bufs) {
1319 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
51b19ebe 1320 sg, elem->in_num,
63c58728
MT
1321 offsetof(typeof(mhdr), num_buffers),
1322 sizeof(mhdr.num_buffers));
1323 }
fbe78f4f 1324
51b19ebe 1325 receive_header(n, sg, elem->in_num, buf, size);
c8d28e7e 1326 offset = n->host_hdr_len;
e35e23f6 1327 total += n->guest_hdr_len;
22cc84db
MT
1328 guest_offset = n->guest_hdr_len;
1329 } else {
1330 guest_offset = 0;
fbe78f4f
AL
1331 }
1332
1333 /* copy in packet. ugh */
51b19ebe 1334 len = iov_from_buf(sg, elem->in_num, guest_offset,
dcf6f5e1 1335 buf + offset, size - offset);
fbe78f4f 1336 total += len;
279a4253
MT
1337 offset += len;
1338 /* If buffers can't be merged, at this point we
1339 * must have consumed the complete packet.
1340 * Otherwise, drop it. */
1341 if (!n->mergeable_rx_bufs && offset < size) {
27e57efe 1342 virtqueue_unpop(q->rx_vq, elem, total);
51b19ebe 1343 g_free(elem);
279a4253
MT
1344 return size;
1345 }
fbe78f4f
AL
1346
1347 /* signal other side */
51b19ebe
PB
1348 virtqueue_fill(q->rx_vq, elem, total, i++);
1349 g_free(elem);
fbe78f4f
AL
1350 }
1351
63c58728 1352 if (mhdr_cnt) {
1399c60d 1353 virtio_stw_p(vdev, &mhdr.num_buffers, i);
63c58728
MT
1354 iov_from_buf(mhdr_sg, mhdr_cnt,
1355 0,
1356 &mhdr.num_buffers, sizeof mhdr.num_buffers);
44b15bc5 1357 }
fbe78f4f 1358
0c87e93e 1359 virtqueue_flush(q->rx_vq, i);
17a0ca55 1360 virtio_notify(vdev, q->rx_vq);
4f1c942b
MM
1361
1362 return size;
fbe78f4f
AL
1363}
1364
2974e916 1365static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
97cd965c
PB
1366 size_t size)
1367{
1368 ssize_t r;
1369
1370 rcu_read_lock();
1371 r = virtio_net_receive_rcu(nc, buf, size);
1372 rcu_read_unlock();
1373 return r;
1374}
1375
2974e916
YB
1376static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1377 const uint8_t *buf,
1378 VirtioNetRscUnit *unit)
1379{
1380 uint16_t ip_hdrlen;
1381 struct ip_header *ip;
1382
1383 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1384 + sizeof(struct eth_header));
1385 unit->ip = (void *)ip;
1386 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1387 unit->ip_plen = &ip->ip_len;
1388 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1389 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1390 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1391}
1392
1393static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1394 const uint8_t *buf,
1395 VirtioNetRscUnit *unit)
1396{
1397 struct ip6_header *ip6;
1398
1399 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1400 + sizeof(struct eth_header));
1401 unit->ip = ip6;
1402 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1403 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)\
1404 + sizeof(struct ip6_header));
1405 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1406
1407 /* There is a difference between payload lenght in ipv4 and v6,
1408 ip header is excluded in ipv6 */
1409 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
1410}
1411
1412static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
1413 VirtioNetRscSeg *seg)
1414{
1415 int ret;
1416 struct virtio_net_hdr *h;
1417
1418 h = (struct virtio_net_hdr *)seg->buf;
1419 h->flags = 0;
1420 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1421
1422 if (seg->is_coalesced) {
1423 *virtio_net_rsc_ext_num_packets(h) = seg->packets;
1424 *virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack;
1425 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
1426 if (chain->proto == ETH_P_IP) {
1427 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1428 } else {
1429 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1430 }
1431 }
1432
1433 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
1434 QTAILQ_REMOVE(&chain->buffers, seg, next);
1435 g_free(seg->buf);
1436 g_free(seg);
1437
1438 return ret;
1439}
1440
1441static void virtio_net_rsc_purge(void *opq)
1442{
1443 VirtioNetRscSeg *seg, *rn;
1444 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
1445
1446 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
1447 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1448 chain->stat.purge_failed++;
1449 continue;
1450 }
1451 }
1452
1453 chain->stat.timer++;
1454 if (!QTAILQ_EMPTY(&chain->buffers)) {
1455 timer_mod(chain->drain_timer,
1456 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1457 }
1458}
1459
1460static void virtio_net_rsc_cleanup(VirtIONet *n)
1461{
1462 VirtioNetRscChain *chain, *rn_chain;
1463 VirtioNetRscSeg *seg, *rn_seg;
1464
1465 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
1466 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
1467 QTAILQ_REMOVE(&chain->buffers, seg, next);
1468 g_free(seg->buf);
1469 g_free(seg);
1470 }
1471
1472 timer_del(chain->drain_timer);
1473 timer_free(chain->drain_timer);
1474 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
1475 g_free(chain);
1476 }
1477}
1478
1479static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
1480 NetClientState *nc,
1481 const uint8_t *buf, size_t size)
1482{
1483 uint16_t hdr_len;
1484 VirtioNetRscSeg *seg;
1485
1486 hdr_len = chain->n->guest_hdr_len;
1487 seg = g_malloc(sizeof(VirtioNetRscSeg));
1488 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
1489 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
1490 memcpy(seg->buf, buf, size);
1491 seg->size = size;
1492 seg->packets = 1;
1493 seg->dup_ack = 0;
1494 seg->is_coalesced = 0;
1495 seg->nc = nc;
1496
1497 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
1498 chain->stat.cache++;
1499
1500 switch (chain->proto) {
1501 case ETH_P_IP:
1502 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
1503 break;
1504 case ETH_P_IPV6:
1505 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
1506 break;
1507 default:
1508 g_assert_not_reached();
1509 }
1510}
1511
1512static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
1513 VirtioNetRscSeg *seg,
1514 const uint8_t *buf,
1515 struct tcp_header *n_tcp,
1516 struct tcp_header *o_tcp)
1517{
1518 uint32_t nack, oack;
1519 uint16_t nwin, owin;
1520
1521 nack = htonl(n_tcp->th_ack);
1522 nwin = htons(n_tcp->th_win);
1523 oack = htonl(o_tcp->th_ack);
1524 owin = htons(o_tcp->th_win);
1525
1526 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
1527 chain->stat.ack_out_of_win++;
1528 return RSC_FINAL;
1529 } else if (nack == oack) {
1530 /* duplicated ack or window probe */
1531 if (nwin == owin) {
1532 /* duplicated ack, add dup ack count due to whql test up to 1 */
1533 chain->stat.dup_ack++;
1534 return RSC_FINAL;
1535 } else {
1536 /* Coalesce window update */
1537 o_tcp->th_win = n_tcp->th_win;
1538 chain->stat.win_update++;
1539 return RSC_COALESCE;
1540 }
1541 } else {
1542 /* pure ack, go to 'C', finalize*/
1543 chain->stat.pure_ack++;
1544 return RSC_FINAL;
1545 }
1546}
1547
1548static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
1549 VirtioNetRscSeg *seg,
1550 const uint8_t *buf,
1551 VirtioNetRscUnit *n_unit)
1552{
1553 void *data;
1554 uint16_t o_ip_len;
1555 uint32_t nseq, oseq;
1556 VirtioNetRscUnit *o_unit;
1557
1558 o_unit = &seg->unit;
1559 o_ip_len = htons(*o_unit->ip_plen);
1560 nseq = htonl(n_unit->tcp->th_seq);
1561 oseq = htonl(o_unit->tcp->th_seq);
1562
1563 /* out of order or retransmitted. */
1564 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
1565 chain->stat.data_out_of_win++;
1566 return RSC_FINAL;
1567 }
1568
1569 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
1570 if (nseq == oseq) {
1571 if ((o_unit->payload == 0) && n_unit->payload) {
1572 /* From no payload to payload, normal case, not a dup ack or etc */
1573 chain->stat.data_after_pure_ack++;
1574 goto coalesce;
1575 } else {
1576 return virtio_net_rsc_handle_ack(chain, seg, buf,
1577 n_unit->tcp, o_unit->tcp);
1578 }
1579 } else if ((nseq - oseq) != o_unit->payload) {
1580 /* Not a consistent packet, out of order */
1581 chain->stat.data_out_of_order++;
1582 return RSC_FINAL;
1583 } else {
1584coalesce:
1585 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
1586 chain->stat.over_size++;
1587 return RSC_FINAL;
1588 }
1589
1590 /* Here comes the right data, the payload length in v4/v6 is different,
1591 so use the field value to update and record the new data len */
1592 o_unit->payload += n_unit->payload; /* update new data len */
1593
1594 /* update field in ip header */
1595 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
1596
1597 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
1598 for windows guest, while this may change the behavior for linux
1599 guest (only if it uses RSC feature). */
1600 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
1601
1602 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
1603 o_unit->tcp->th_win = n_unit->tcp->th_win;
1604
1605 memmove(seg->buf + seg->size, data, n_unit->payload);
1606 seg->size += n_unit->payload;
1607 seg->packets++;
1608 chain->stat.coalesced++;
1609 return RSC_COALESCE;
1610 }
1611}
1612
1613static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
1614 VirtioNetRscSeg *seg,
1615 const uint8_t *buf, size_t size,
1616 VirtioNetRscUnit *unit)
1617{
1618 struct ip_header *ip1, *ip2;
1619
1620 ip1 = (struct ip_header *)(unit->ip);
1621 ip2 = (struct ip_header *)(seg->unit.ip);
1622 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
1623 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1624 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1625 chain->stat.no_match++;
1626 return RSC_NO_MATCH;
1627 }
1628
1629 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1630}
1631
1632static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
1633 VirtioNetRscSeg *seg,
1634 const uint8_t *buf, size_t size,
1635 VirtioNetRscUnit *unit)
1636{
1637 struct ip6_header *ip1, *ip2;
1638
1639 ip1 = (struct ip6_header *)(unit->ip);
1640 ip2 = (struct ip6_header *)(seg->unit.ip);
1641 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
1642 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
1643 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1644 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1645 chain->stat.no_match++;
1646 return RSC_NO_MATCH;
1647 }
1648
1649 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1650}
1651
1652/* Packets with 'SYN' should bypass, other flag should be sent after drain
1653 * to prevent out of order */
1654static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
1655 struct tcp_header *tcp)
1656{
1657 uint16_t tcp_hdr;
1658 uint16_t tcp_flag;
1659
1660 tcp_flag = htons(tcp->th_offset_flags);
1661 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
1662 tcp_flag &= VIRTIO_NET_TCP_FLAG;
1663 tcp_flag = htons(tcp->th_offset_flags) & 0x3F;
1664 if (tcp_flag & TH_SYN) {
1665 chain->stat.tcp_syn++;
1666 return RSC_BYPASS;
1667 }
1668
1669 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
1670 chain->stat.tcp_ctrl_drain++;
1671 return RSC_FINAL;
1672 }
1673
1674 if (tcp_hdr > sizeof(struct tcp_header)) {
1675 chain->stat.tcp_all_opt++;
1676 return RSC_FINAL;
1677 }
1678
1679 return RSC_CANDIDATE;
1680}
1681
1682static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
1683 NetClientState *nc,
1684 const uint8_t *buf, size_t size,
1685 VirtioNetRscUnit *unit)
1686{
1687 int ret;
1688 VirtioNetRscSeg *seg, *nseg;
1689
1690 if (QTAILQ_EMPTY(&chain->buffers)) {
1691 chain->stat.empty_cache++;
1692 virtio_net_rsc_cache_buf(chain, nc, buf, size);
1693 timer_mod(chain->drain_timer,
1694 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1695 return size;
1696 }
1697
1698 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1699 if (chain->proto == ETH_P_IP) {
1700 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
1701 } else {
1702 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
1703 }
1704
1705 if (ret == RSC_FINAL) {
1706 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1707 /* Send failed */
1708 chain->stat.final_failed++;
1709 return 0;
1710 }
1711
1712 /* Send current packet */
1713 return virtio_net_do_receive(nc, buf, size);
1714 } else if (ret == RSC_NO_MATCH) {
1715 continue;
1716 } else {
1717 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
1718 seg->is_coalesced = 1;
1719 return size;
1720 }
1721 }
1722
1723 chain->stat.no_match_cache++;
1724 virtio_net_rsc_cache_buf(chain, nc, buf, size);
1725 return size;
1726}
1727
1728/* Drain a connection data, this is to avoid out of order segments */
1729static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
1730 NetClientState *nc,
1731 const uint8_t *buf, size_t size,
1732 uint16_t ip_start, uint16_t ip_size,
1733 uint16_t tcp_port)
1734{
1735 VirtioNetRscSeg *seg, *nseg;
1736 uint32_t ppair1, ppair2;
1737
1738 ppair1 = *(uint32_t *)(buf + tcp_port);
1739 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1740 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
1741 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
1742 || (ppair1 != ppair2)) {
1743 continue;
1744 }
1745 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1746 chain->stat.drain_failed++;
1747 }
1748
1749 break;
1750 }
1751
1752 return virtio_net_do_receive(nc, buf, size);
1753}
1754
1755static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
1756 struct ip_header *ip,
1757 const uint8_t *buf, size_t size)
1758{
1759 uint16_t ip_len;
1760
1761 /* Not an ipv4 packet */
1762 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
1763 chain->stat.ip_option++;
1764 return RSC_BYPASS;
1765 }
1766
1767 /* Don't handle packets with ip option */
1768 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
1769 chain->stat.ip_option++;
1770 return RSC_BYPASS;
1771 }
1772
1773 if (ip->ip_p != IPPROTO_TCP) {
1774 chain->stat.bypass_not_tcp++;
1775 return RSC_BYPASS;
1776 }
1777
1778 /* Don't handle packets with ip fragment */
1779 if (!(htons(ip->ip_off) & IP_DF)) {
1780 chain->stat.ip_frag++;
1781 return RSC_BYPASS;
1782 }
1783
1784 /* Don't handle packets with ecn flag */
1785 if (IPTOS_ECN(ip->ip_tos)) {
1786 chain->stat.ip_ecn++;
1787 return RSC_BYPASS;
1788 }
1789
1790 ip_len = htons(ip->ip_len);
1791 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
1792 || ip_len > (size - chain->n->guest_hdr_len -
1793 sizeof(struct eth_header))) {
1794 chain->stat.ip_hacked++;
1795 return RSC_BYPASS;
1796 }
1797
1798 return RSC_CANDIDATE;
1799}
1800
1801static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
1802 NetClientState *nc,
1803 const uint8_t *buf, size_t size)
1804{
1805 int32_t ret;
1806 uint16_t hdr_len;
1807 VirtioNetRscUnit unit;
1808
1809 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1810
1811 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
1812 + sizeof(struct tcp_header))) {
1813 chain->stat.bypass_not_tcp++;
1814 return virtio_net_do_receive(nc, buf, size);
1815 }
1816
1817 virtio_net_rsc_extract_unit4(chain, buf, &unit);
1818 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
1819 != RSC_CANDIDATE) {
1820 return virtio_net_do_receive(nc, buf, size);
1821 }
1822
1823 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1824 if (ret == RSC_BYPASS) {
1825 return virtio_net_do_receive(nc, buf, size);
1826 } else if (ret == RSC_FINAL) {
1827 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1828 ((hdr_len + sizeof(struct eth_header)) + 12),
1829 VIRTIO_NET_IP4_ADDR_SIZE,
1830 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
1831 }
1832
1833 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1834}
1835
1836static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
1837 struct ip6_header *ip6,
1838 const uint8_t *buf, size_t size)
1839{
1840 uint16_t ip_len;
1841
1842 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
1843 != IP_HEADER_VERSION_6) {
1844 return RSC_BYPASS;
1845 }
1846
1847 /* Both option and protocol is checked in this */
1848 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
1849 chain->stat.bypass_not_tcp++;
1850 return RSC_BYPASS;
1851 }
1852
1853 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1854 if (ip_len < sizeof(struct tcp_header) ||
1855 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
1856 - sizeof(struct ip6_header))) {
1857 chain->stat.ip_hacked++;
1858 return RSC_BYPASS;
1859 }
1860
1861 /* Don't handle packets with ecn flag */
1862 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
1863 chain->stat.ip_ecn++;
1864 return RSC_BYPASS;
1865 }
1866
1867 return RSC_CANDIDATE;
1868}
1869
1870static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
1871 const uint8_t *buf, size_t size)
1872{
1873 int32_t ret;
1874 uint16_t hdr_len;
1875 VirtioNetRscChain *chain;
1876 VirtioNetRscUnit unit;
1877
1878 chain = (VirtioNetRscChain *)opq;
1879 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1880
1881 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
1882 + sizeof(tcp_header))) {
1883 return virtio_net_do_receive(nc, buf, size);
1884 }
1885
1886 virtio_net_rsc_extract_unit6(chain, buf, &unit);
1887 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
1888 unit.ip, buf, size)) {
1889 return virtio_net_do_receive(nc, buf, size);
1890 }
1891
1892 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1893 if (ret == RSC_BYPASS) {
1894 return virtio_net_do_receive(nc, buf, size);
1895 } else if (ret == RSC_FINAL) {
1896 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1897 ((hdr_len + sizeof(struct eth_header)) + 8),
1898 VIRTIO_NET_IP6_ADDR_SIZE,
1899 hdr_len + sizeof(struct eth_header)
1900 + sizeof(struct ip6_header));
1901 }
1902
1903 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1904}
1905
1906static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
1907 NetClientState *nc,
1908 uint16_t proto)
1909{
1910 VirtioNetRscChain *chain;
1911
1912 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
1913 return NULL;
1914 }
1915
1916 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
1917 if (chain->proto == proto) {
1918 return chain;
1919 }
1920 }
1921
1922 chain = g_malloc(sizeof(*chain));
1923 chain->n = n;
1924 chain->proto = proto;
1925 if (proto == (uint16_t)ETH_P_IP) {
1926 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
1927 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1928 } else {
1929 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
1930 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1931 }
1932 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
1933 virtio_net_rsc_purge, chain);
1934 memset(&chain->stat, 0, sizeof(chain->stat));
1935
1936 QTAILQ_INIT(&chain->buffers);
1937 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
1938
1939 return chain;
1940}
1941
1942static ssize_t virtio_net_rsc_receive(NetClientState *nc,
1943 const uint8_t *buf,
1944 size_t size)
1945{
1946 uint16_t proto;
1947 VirtioNetRscChain *chain;
1948 struct eth_header *eth;
1949 VirtIONet *n;
1950
1951 n = qemu_get_nic_opaque(nc);
1952 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
1953 return virtio_net_do_receive(nc, buf, size);
1954 }
1955
1956 eth = (struct eth_header *)(buf + n->guest_hdr_len);
1957 proto = htons(eth->h_proto);
1958
1959 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
1960 if (chain) {
1961 chain->stat.received++;
1962 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
1963 return virtio_net_rsc_receive4(chain, nc, buf, size);
1964 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
1965 return virtio_net_rsc_receive6(chain, nc, buf, size);
1966 }
1967 }
1968 return virtio_net_do_receive(nc, buf, size);
1969}
1970
1971static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1972 size_t size)
1973{
1974 VirtIONet *n = qemu_get_nic_opaque(nc);
1975 if ((n->rsc4_enabled || n->rsc6_enabled)) {
1976 return virtio_net_rsc_receive(nc, buf, size);
1977 } else {
1978 return virtio_net_do_receive(nc, buf, size);
1979 }
1980}
1981
0c87e93e 1982static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
6243375f 1983
4e68f7a0 1984static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
6243375f 1985{
cc1f0f45 1986 VirtIONet *n = qemu_get_nic_opaque(nc);
fed699f9 1987 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
17a0ca55 1988 VirtIODevice *vdev = VIRTIO_DEVICE(n);
6243375f 1989
51b19ebe 1990 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
17a0ca55 1991 virtio_notify(vdev, q->tx_vq);
6243375f 1992
51b19ebe
PB
1993 g_free(q->async_tx.elem);
1994 q->async_tx.elem = NULL;
6243375f 1995
0c87e93e
JW
1996 virtio_queue_set_notification(q->tx_vq, 1);
1997 virtio_net_flush_tx(q);
6243375f
MM
1998}
1999
fbe78f4f 2000/* TX */
0c87e93e 2001static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
fbe78f4f 2002{
0c87e93e 2003 VirtIONet *n = q->n;
17a0ca55 2004 VirtIODevice *vdev = VIRTIO_DEVICE(n);
51b19ebe 2005 VirtQueueElement *elem;
e3f30488 2006 int32_t num_packets = 0;
fed699f9 2007 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
17a0ca55 2008 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
e3f30488
AW
2009 return num_packets;
2010 }
fbe78f4f 2011
51b19ebe 2012 if (q->async_tx.elem) {
0c87e93e 2013 virtio_queue_set_notification(q->tx_vq, 0);
e3f30488 2014 return num_packets;
6243375f
MM
2015 }
2016
51b19ebe 2017 for (;;) {
bd89dd98 2018 ssize_t ret;
51b19ebe
PB
2019 unsigned int out_num;
2020 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
feb93f36 2021 struct virtio_net_hdr_mrg_rxbuf mhdr;
fbe78f4f 2022
51b19ebe
PB
2023 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2024 if (!elem) {
2025 break;
2026 }
2027
2028 out_num = elem->out_num;
2029 out_sg = elem->out_sg;
7b80d08e 2030 if (out_num < 1) {
fa5e56c2
GK
2031 virtio_error(vdev, "virtio-net header not in first element");
2032 virtqueue_detach_element(q->tx_vq, elem, 0);
2033 g_free(elem);
2034 return -EINVAL;
fbe78f4f
AL
2035 }
2036
032a74a1 2037 if (n->has_vnet_hdr) {
feb93f36
JW
2038 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2039 n->guest_hdr_len) {
fa5e56c2
GK
2040 virtio_error(vdev, "virtio-net header incorrect");
2041 virtqueue_detach_element(q->tx_vq, elem, 0);
2042 g_free(elem);
2043 return -EINVAL;
032a74a1 2044 }
1bfa316c 2045 if (n->needs_vnet_hdr_swap) {
feb93f36
JW
2046 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2047 sg2[0].iov_base = &mhdr;
2048 sg2[0].iov_len = n->guest_hdr_len;
2049 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2050 out_sg, out_num,
2051 n->guest_hdr_len, -1);
2052 if (out_num == VIRTQUEUE_MAX_SIZE) {
2053 goto drop;
7d37435b 2054 }
feb93f36
JW
2055 out_num += 1;
2056 out_sg = sg2;
7d37435b 2057 }
032a74a1 2058 }
14761f9c
MT
2059 /*
2060 * If host wants to see the guest header as is, we can
2061 * pass it on unchanged. Otherwise, copy just the parts
2062 * that host is interested in.
2063 */
2064 assert(n->host_hdr_len <= n->guest_hdr_len);
2065 if (n->host_hdr_len != n->guest_hdr_len) {
2066 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2067 out_sg, out_num,
2068 0, n->host_hdr_len);
2069 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2070 out_sg, out_num,
2071 n->guest_hdr_len, -1);
2072 out_num = sg_num;
2073 out_sg = sg;
fbe78f4f
AL
2074 }
2075
fed699f9
JW
2076 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2077 out_sg, out_num, virtio_net_tx_complete);
6243375f 2078 if (ret == 0) {
0c87e93e
JW
2079 virtio_queue_set_notification(q->tx_vq, 0);
2080 q->async_tx.elem = elem;
e3f30488 2081 return -EBUSY;
6243375f
MM
2082 }
2083
feb93f36 2084drop:
51b19ebe 2085 virtqueue_push(q->tx_vq, elem, 0);
17a0ca55 2086 virtio_notify(vdev, q->tx_vq);
51b19ebe 2087 g_free(elem);
e3f30488
AW
2088
2089 if (++num_packets >= n->tx_burst) {
2090 break;
2091 }
fbe78f4f 2092 }
e3f30488 2093 return num_packets;
fbe78f4f
AL
2094}
2095
a697a334 2096static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
fbe78f4f 2097{
17a0ca55 2098 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2099 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
fbe78f4f 2100
283e2c2a
YB
2101 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2102 virtio_net_drop_tx_queue_data(vdev, vq);
2103 return;
2104 }
2105
783e7706 2106 /* This happens when device was stopped but VCPU wasn't. */
17a0ca55 2107 if (!vdev->vm_running) {
0c87e93e 2108 q->tx_waiting = 1;
783e7706
MT
2109 return;
2110 }
2111
0c87e93e 2112 if (q->tx_waiting) {
fbe78f4f 2113 virtio_queue_set_notification(vq, 1);
bc72ad67 2114 timer_del(q->tx_timer);
0c87e93e 2115 q->tx_waiting = 0;
fa5e56c2
GK
2116 if (virtio_net_flush_tx(q) == -EINVAL) {
2117 return;
2118 }
fbe78f4f 2119 } else {
bc72ad67
AB
2120 timer_mod(q->tx_timer,
2121 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
0c87e93e 2122 q->tx_waiting = 1;
fbe78f4f
AL
2123 virtio_queue_set_notification(vq, 0);
2124 }
2125}
2126
a697a334
AW
2127static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2128{
17a0ca55 2129 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2130 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
a697a334 2131
283e2c2a
YB
2132 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2133 virtio_net_drop_tx_queue_data(vdev, vq);
2134 return;
2135 }
2136
0c87e93e 2137 if (unlikely(q->tx_waiting)) {
a697a334
AW
2138 return;
2139 }
0c87e93e 2140 q->tx_waiting = 1;
783e7706 2141 /* This happens when device was stopped but VCPU wasn't. */
17a0ca55 2142 if (!vdev->vm_running) {
783e7706
MT
2143 return;
2144 }
a697a334 2145 virtio_queue_set_notification(vq, 0);
0c87e93e 2146 qemu_bh_schedule(q->tx_bh);
a697a334
AW
2147}
2148
fbe78f4f
AL
2149static void virtio_net_tx_timer(void *opaque)
2150{
0c87e93e
JW
2151 VirtIONetQueue *q = opaque;
2152 VirtIONet *n = q->n;
17a0ca55 2153 VirtIODevice *vdev = VIRTIO_DEVICE(n);
e8bcf842
MT
2154 /* This happens when device was stopped but BH wasn't. */
2155 if (!vdev->vm_running) {
2156 /* Make sure tx waiting is set, so we'll run when restarted. */
2157 assert(q->tx_waiting);
2158 return;
2159 }
fbe78f4f 2160
0c87e93e 2161 q->tx_waiting = 0;
fbe78f4f
AL
2162
2163 /* Just in case the driver is not ready on more */
17a0ca55 2164 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
fbe78f4f 2165 return;
17a0ca55 2166 }
fbe78f4f 2167
0c87e93e
JW
2168 virtio_queue_set_notification(q->tx_vq, 1);
2169 virtio_net_flush_tx(q);
fbe78f4f
AL
2170}
2171
a697a334
AW
2172static void virtio_net_tx_bh(void *opaque)
2173{
0c87e93e
JW
2174 VirtIONetQueue *q = opaque;
2175 VirtIONet *n = q->n;
17a0ca55 2176 VirtIODevice *vdev = VIRTIO_DEVICE(n);
a697a334
AW
2177 int32_t ret;
2178
e8bcf842
MT
2179 /* This happens when device was stopped but BH wasn't. */
2180 if (!vdev->vm_running) {
2181 /* Make sure tx waiting is set, so we'll run when restarted. */
2182 assert(q->tx_waiting);
2183 return;
2184 }
783e7706 2185
0c87e93e 2186 q->tx_waiting = 0;
a697a334
AW
2187
2188 /* Just in case the driver is not ready on more */
17a0ca55 2189 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
a697a334 2190 return;
17a0ca55 2191 }
a697a334 2192
0c87e93e 2193 ret = virtio_net_flush_tx(q);
fa5e56c2
GK
2194 if (ret == -EBUSY || ret == -EINVAL) {
2195 return; /* Notification re-enable handled by tx_complete or device
2196 * broken */
a697a334
AW
2197 }
2198
2199 /* If we flush a full burst of packets, assume there are
2200 * more coming and immediately reschedule */
2201 if (ret >= n->tx_burst) {
0c87e93e
JW
2202 qemu_bh_schedule(q->tx_bh);
2203 q->tx_waiting = 1;
a697a334
AW
2204 return;
2205 }
2206
2207 /* If less than a full burst, re-enable notification and flush
2208 * anything that may have come in while we weren't looking. If
2209 * we find something, assume the guest is still active and reschedule */
0c87e93e 2210 virtio_queue_set_notification(q->tx_vq, 1);
fa5e56c2
GK
2211 ret = virtio_net_flush_tx(q);
2212 if (ret == -EINVAL) {
2213 return;
2214 } else if (ret > 0) {
0c87e93e
JW
2215 virtio_queue_set_notification(q->tx_vq, 0);
2216 qemu_bh_schedule(q->tx_bh);
2217 q->tx_waiting = 1;
a697a334
AW
2218 }
2219}
2220
f9d6dbf0
WC
2221static void virtio_net_add_queue(VirtIONet *n, int index)
2222{
2223 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2224
1c0fbfa3
MT
2225 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2226 virtio_net_handle_rx);
9b02e161 2227
f9d6dbf0
WC
2228 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2229 n->vqs[index].tx_vq =
9b02e161
WW
2230 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2231 virtio_net_handle_tx_timer);
f9d6dbf0
WC
2232 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2233 virtio_net_tx_timer,
2234 &n->vqs[index]);
2235 } else {
2236 n->vqs[index].tx_vq =
9b02e161
WW
2237 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2238 virtio_net_handle_tx_bh);
f9d6dbf0
WC
2239 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2240 }
2241
2242 n->vqs[index].tx_waiting = 0;
2243 n->vqs[index].n = n;
2244}
2245
2246static void virtio_net_del_queue(VirtIONet *n, int index)
2247{
2248 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2249 VirtIONetQueue *q = &n->vqs[index];
2250 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2251
2252 qemu_purge_queued_packets(nc);
2253
2254 virtio_del_queue(vdev, index * 2);
2255 if (q->tx_timer) {
2256 timer_del(q->tx_timer);
2257 timer_free(q->tx_timer);
f989c30c 2258 q->tx_timer = NULL;
f9d6dbf0
WC
2259 } else {
2260 qemu_bh_delete(q->tx_bh);
f989c30c 2261 q->tx_bh = NULL;
f9d6dbf0 2262 }
f989c30c 2263 q->tx_waiting = 0;
f9d6dbf0
WC
2264 virtio_del_queue(vdev, index * 2 + 1);
2265}
2266
2267static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
2268{
2269 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2270 int old_num_queues = virtio_get_num_queues(vdev);
2271 int new_num_queues = new_max_queues * 2 + 1;
2272 int i;
2273
2274 assert(old_num_queues >= 3);
2275 assert(old_num_queues % 2 == 1);
2276
2277 if (old_num_queues == new_num_queues) {
2278 return;
2279 }
2280
2281 /*
2282 * We always need to remove and add ctrl vq if
2283 * old_num_queues != new_num_queues. Remove ctrl_vq first,
2284 * and then we only enter one of the following too loops.
2285 */
2286 virtio_del_queue(vdev, old_num_queues - 1);
2287
2288 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2289 /* new_num_queues < old_num_queues */
2290 virtio_net_del_queue(n, i / 2);
2291 }
2292
2293 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2294 /* new_num_queues > old_num_queues */
2295 virtio_net_add_queue(n, i / 2);
2296 }
2297
2298 /* add ctrl_vq last */
2299 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2300}
2301
ec57db16 2302static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
fed699f9 2303{
f9d6dbf0
WC
2304 int max = multiqueue ? n->max_queues : 1;
2305
fed699f9 2306 n->multiqueue = multiqueue;
f9d6dbf0 2307 virtio_net_change_num_queues(n, max);
fed699f9 2308
fed699f9
JW
2309 virtio_net_set_queues(n);
2310}
2311
982b78c5 2312static int virtio_net_post_load_device(void *opaque, int version_id)
037dab2f 2313{
982b78c5
DDAG
2314 VirtIONet *n = opaque;
2315 VirtIODevice *vdev = VIRTIO_DEVICE(n);
037dab2f 2316 int i, link_down;
fbe78f4f 2317
9d8c6a25 2318 trace_virtio_net_post_load_device();
982b78c5 2319 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
95129d6f
CH
2320 virtio_vdev_has_feature(vdev,
2321 VIRTIO_F_VERSION_1));
fbe78f4f 2322
76010cb3 2323 /* MAC_TABLE_ENTRIES may be different from the saved image */
982b78c5 2324 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
76010cb3 2325 n->mac_table.in_use = 0;
b6503ed9 2326 }
0ce0e8f4 2327
982b78c5 2328 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
6c666823
MT
2329 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2330 }
2331
2332 if (peer_has_vnet_hdr(n)) {
2333 virtio_net_apply_guest_offloads(n);
2334 }
2335
5f800801
JW
2336 virtio_net_set_queues(n);
2337
2d9aba39
AW
2338 /* Find the first multicast entry in the saved MAC filter */
2339 for (i = 0; i < n->mac_table.in_use; i++) {
2340 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2341 break;
2342 }
2343 }
2344 n->mac_table.first_multi = i;
98991481
AK
2345
2346 /* nc.link_down can't be migrated, so infer link_down according
2347 * to link status bit in n->status */
5f800801
JW
2348 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
2349 for (i = 0; i < n->max_queues; i++) {
2350 qemu_get_subqueue(n->nic, i)->link_down = link_down;
2351 }
98991481 2352
6c666823
MT
2353 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2354 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
9d8c6a25
DDAG
2355 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2356 QEMU_CLOCK_VIRTUAL,
2357 virtio_net_announce_timer, n);
2358 if (n->announce_timer.round) {
2359 timer_mod(n->announce_timer.tm,
2360 qemu_clock_get_ms(n->announce_timer.type));
2361 } else {
2362 qemu_announce_timer_del(&n->announce_timer);
2363 }
6c666823
MT
2364 }
2365
fbe78f4f
AL
2366 return 0;
2367}
2368
982b78c5
DDAG
2369/* tx_waiting field of a VirtIONetQueue */
2370static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
2371 .name = "virtio-net-queue-tx_waiting",
2372 .fields = (VMStateField[]) {
2373 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
2374 VMSTATE_END_OF_LIST()
2375 },
2376};
2377
2378static bool max_queues_gt_1(void *opaque, int version_id)
2379{
2380 return VIRTIO_NET(opaque)->max_queues > 1;
2381}
2382
2383static bool has_ctrl_guest_offloads(void *opaque, int version_id)
2384{
2385 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
2386 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
2387}
2388
2389static bool mac_table_fits(void *opaque, int version_id)
2390{
2391 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
2392}
2393
2394static bool mac_table_doesnt_fit(void *opaque, int version_id)
2395{
2396 return !mac_table_fits(opaque, version_id);
2397}
2398
2399/* This temporary type is shared by all the WITH_TMP methods
2400 * although only some fields are used by each.
2401 */
2402struct VirtIONetMigTmp {
2403 VirtIONet *parent;
2404 VirtIONetQueue *vqs_1;
2405 uint16_t curr_queues_1;
2406 uint8_t has_ufo;
2407 uint32_t has_vnet_hdr;
2408};
2409
2410/* The 2nd and subsequent tx_waiting flags are loaded later than
2411 * the 1st entry in the queues and only if there's more than one
2412 * entry. We use the tmp mechanism to calculate a temporary
2413 * pointer and count and also validate the count.
2414 */
2415
44b1ff31 2416static int virtio_net_tx_waiting_pre_save(void *opaque)
982b78c5
DDAG
2417{
2418 struct VirtIONetMigTmp *tmp = opaque;
2419
2420 tmp->vqs_1 = tmp->parent->vqs + 1;
2421 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
2422 if (tmp->parent->curr_queues == 0) {
2423 tmp->curr_queues_1 = 0;
2424 }
44b1ff31
DDAG
2425
2426 return 0;
982b78c5
DDAG
2427}
2428
2429static int virtio_net_tx_waiting_pre_load(void *opaque)
2430{
2431 struct VirtIONetMigTmp *tmp = opaque;
2432
2433 /* Reuse the pointer setup from save */
2434 virtio_net_tx_waiting_pre_save(opaque);
2435
2436 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
2437 error_report("virtio-net: curr_queues %x > max_queues %x",
2438 tmp->parent->curr_queues, tmp->parent->max_queues);
2439
2440 return -EINVAL;
2441 }
2442
2443 return 0; /* all good */
2444}
2445
2446static const VMStateDescription vmstate_virtio_net_tx_waiting = {
2447 .name = "virtio-net-tx_waiting",
2448 .pre_load = virtio_net_tx_waiting_pre_load,
2449 .pre_save = virtio_net_tx_waiting_pre_save,
2450 .fields = (VMStateField[]) {
2451 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
2452 curr_queues_1,
2453 vmstate_virtio_net_queue_tx_waiting,
2454 struct VirtIONetQueue),
2455 VMSTATE_END_OF_LIST()
2456 },
2457};
2458
2459/* the 'has_ufo' flag is just tested; if the incoming stream has the
2460 * flag set we need to check that we have it
2461 */
2462static int virtio_net_ufo_post_load(void *opaque, int version_id)
2463{
2464 struct VirtIONetMigTmp *tmp = opaque;
2465
2466 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
2467 error_report("virtio-net: saved image requires TUN_F_UFO support");
2468 return -EINVAL;
2469 }
2470
2471 return 0;
2472}
2473
44b1ff31 2474static int virtio_net_ufo_pre_save(void *opaque)
982b78c5
DDAG
2475{
2476 struct VirtIONetMigTmp *tmp = opaque;
2477
2478 tmp->has_ufo = tmp->parent->has_ufo;
44b1ff31
DDAG
2479
2480 return 0;
982b78c5
DDAG
2481}
2482
2483static const VMStateDescription vmstate_virtio_net_has_ufo = {
2484 .name = "virtio-net-ufo",
2485 .post_load = virtio_net_ufo_post_load,
2486 .pre_save = virtio_net_ufo_pre_save,
2487 .fields = (VMStateField[]) {
2488 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
2489 VMSTATE_END_OF_LIST()
2490 },
2491};
2492
2493/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
2494 * flag set we need to check that we have it
2495 */
2496static int virtio_net_vnet_post_load(void *opaque, int version_id)
2497{
2498 struct VirtIONetMigTmp *tmp = opaque;
2499
2500 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
2501 error_report("virtio-net: saved image requires vnet_hdr=on");
2502 return -EINVAL;
2503 }
2504
2505 return 0;
2506}
2507
44b1ff31 2508static int virtio_net_vnet_pre_save(void *opaque)
982b78c5
DDAG
2509{
2510 struct VirtIONetMigTmp *tmp = opaque;
2511
2512 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
44b1ff31
DDAG
2513
2514 return 0;
982b78c5
DDAG
2515}
2516
2517static const VMStateDescription vmstate_virtio_net_has_vnet = {
2518 .name = "virtio-net-vnet",
2519 .post_load = virtio_net_vnet_post_load,
2520 .pre_save = virtio_net_vnet_pre_save,
2521 .fields = (VMStateField[]) {
2522 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
2523 VMSTATE_END_OF_LIST()
2524 },
2525};
2526
2527static const VMStateDescription vmstate_virtio_net_device = {
2528 .name = "virtio-net-device",
2529 .version_id = VIRTIO_NET_VM_VERSION,
2530 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2531 .post_load = virtio_net_post_load_device,
2532 .fields = (VMStateField[]) {
2533 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
2534 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
2535 vmstate_virtio_net_queue_tx_waiting,
2536 VirtIONetQueue),
2537 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
2538 VMSTATE_UINT16(status, VirtIONet),
2539 VMSTATE_UINT8(promisc, VirtIONet),
2540 VMSTATE_UINT8(allmulti, VirtIONet),
2541 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
2542
2543 /* Guarded pair: If it fits we load it, else we throw it away
2544 * - can happen if source has a larger MAC table.; post-load
2545 * sets flags in this case.
2546 */
2547 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
2548 0, mac_table_fits, mac_table.in_use,
2549 ETH_ALEN),
2550 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
2551 mac_table.in_use, ETH_ALEN),
2552
2553 /* Note: This is an array of uint32's that's always been saved as a
2554 * buffer; hold onto your endiannesses; it's actually used as a bitmap
2555 * but based on the uint.
2556 */
2557 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
2558 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2559 vmstate_virtio_net_has_vnet),
2560 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
2561 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
2562 VMSTATE_UINT8(alluni, VirtIONet),
2563 VMSTATE_UINT8(nomulti, VirtIONet),
2564 VMSTATE_UINT8(nouni, VirtIONet),
2565 VMSTATE_UINT8(nobcast, VirtIONet),
2566 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2567 vmstate_virtio_net_has_ufo),
2568 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
2569 vmstate_info_uint16_equal, uint16_t),
2570 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
2571 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2572 vmstate_virtio_net_tx_waiting),
2573 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
2574 has_ctrl_guest_offloads),
2575 VMSTATE_END_OF_LIST()
2576 },
2577};
2578
eb6b6c12 2579static NetClientInfo net_virtio_info = {
f394b2e2 2580 .type = NET_CLIENT_DRIVER_NIC,
eb6b6c12
MM
2581 .size = sizeof(NICState),
2582 .can_receive = virtio_net_can_receive,
2583 .receive = virtio_net_receive,
eb6b6c12 2584 .link_status_changed = virtio_net_set_link_status,
b1be4280 2585 .query_rx_filter = virtio_net_query_rxfilter,
b2c929f0 2586 .announce = virtio_net_announce,
eb6b6c12
MM
2587};
2588
f56a1247
MT
2589static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
2590{
17a0ca55 2591 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2592 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
f56a1247 2593 assert(n->vhost_started);
ed8b4afe 2594 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
f56a1247
MT
2595}
2596
2597static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
2598 bool mask)
2599{
17a0ca55 2600 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2601 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
f56a1247 2602 assert(n->vhost_started);
ed8b4afe 2603 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
f56a1247
MT
2604 vdev, idx, mask);
2605}
2606
019a3edb 2607static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
fbe78f4f 2608{
0cd09c3a 2609 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
a93e599d 2610
ba550851
SG
2611 n->config_size = virtio_feature_get_config_size(feature_sizes,
2612 host_features);
17ec5a86
FK
2613}
2614
8a253ec2
FK
2615void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
2616 const char *type)
2617{
2618 /*
2619 * The name can be NULL, the netclient name will be type.x.
2620 */
2621 assert(type != NULL);
2622
9e288406 2623 g_free(n->netclient_name);
9e288406 2624 g_free(n->netclient_type);
80e0090a 2625 n->netclient_name = g_strdup(name);
8a253ec2
FK
2626 n->netclient_type = g_strdup(type);
2627}
2628
e6f746b3 2629static void virtio_net_device_realize(DeviceState *dev, Error **errp)
17ec5a86 2630{
e6f746b3 2631 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
284a32f0 2632 VirtIONet *n = VIRTIO_NET(dev);
b1be4280 2633 NetClientState *nc;
284a32f0 2634 int i;
1773d9ee 2635
a93e599d 2636 if (n->net_conf.mtu) {
127833ee 2637 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
a93e599d
MC
2638 }
2639
9473939e
JB
2640 if (n->net_conf.duplex_str) {
2641 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
2642 n->net_conf.duplex = DUPLEX_HALF;
2643 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
2644 n->net_conf.duplex = DUPLEX_FULL;
2645 } else {
2646 error_setg(errp, "'duplex' must be 'half' or 'full'");
2647 }
2648 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2649 } else {
2650 n->net_conf.duplex = DUPLEX_UNKNOWN;
2651 }
2652
2653 if (n->net_conf.speed < SPEED_UNKNOWN) {
2654 error_setg(errp, "'speed' must be between 0 and INT_MAX");
2655 } else if (n->net_conf.speed >= 0) {
2656 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2657 }
2658
da3e8a23 2659 virtio_net_set_config_size(n, n->host_features);
284a32f0 2660 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
fbe78f4f 2661
1c0fbfa3
MT
2662 /*
2663 * We set a lower limit on RX queue size to what it always was.
2664 * Guests that want a smaller ring can always resize it without
2665 * help from us (using virtio 1 and up).
2666 */
2667 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
2668 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
5f997fd1 2669 !is_power_of_2(n->net_conf.rx_queue_size)) {
1c0fbfa3
MT
2670 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
2671 "must be a power of 2 between %d and %d.",
2672 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
2673 VIRTQUEUE_MAX_SIZE);
2674 virtio_cleanup(vdev);
2675 return;
2676 }
2677
9b02e161
WW
2678 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
2679 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
2680 !is_power_of_2(n->net_conf.tx_queue_size)) {
2681 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
2682 "must be a power of 2 between %d and %d",
2683 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
2684 VIRTQUEUE_MAX_SIZE);
2685 virtio_cleanup(vdev);
2686 return;
2687 }
2688
575a1c0e 2689 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
87b3bd1c 2690 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
7e0e736e 2691 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
631b22ea 2692 "must be a positive integer less than %d.",
87b3bd1c 2693 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
7e0e736e
JW
2694 virtio_cleanup(vdev);
2695 return;
2696 }
f6b26cf2 2697 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
fed699f9 2698 n->curr_queues = 1;
1773d9ee 2699 n->tx_timeout = n->net_conf.txtimer;
a697a334 2700
1773d9ee
FK
2701 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
2702 && strcmp(n->net_conf.tx, "bh")) {
0765691e
MA
2703 warn_report("virtio-net: "
2704 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
2705 n->net_conf.tx);
2706 error_printf("Defaulting to \"bh\"");
a697a334
AW
2707 }
2708
2eef278b
MT
2709 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
2710 n->net_conf.tx_queue_size);
9b02e161 2711
da51a335 2712 for (i = 0; i < n->max_queues; i++) {
f9d6dbf0 2713 virtio_net_add_queue(n, i);
a697a334 2714 }
da51a335 2715
17a0ca55 2716 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1773d9ee
FK
2717 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
2718 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
554c97dd 2719 n->status = VIRTIO_NET_S_LINK_UP;
9d8c6a25
DDAG
2720 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2721 QEMU_CLOCK_VIRTUAL,
2722 virtio_net_announce_timer, n);
b2c929f0 2723 n->announce_timer.round = 0;
fbe78f4f 2724
8a253ec2
FK
2725 if (n->netclient_type) {
2726 /*
2727 * Happen when virtio_net_set_netclient_name has been called.
2728 */
2729 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2730 n->netclient_type, n->netclient_name, n);
2731 } else {
2732 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
284a32f0 2733 object_get_typename(OBJECT(dev)), dev->id, n);
8a253ec2
FK
2734 }
2735
6e371ab8
MT
2736 peer_test_vnet_hdr(n);
2737 if (peer_has_vnet_hdr(n)) {
fed699f9 2738 for (i = 0; i < n->max_queues; i++) {
d6085e3a 2739 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
fed699f9 2740 }
6e371ab8
MT
2741 n->host_hdr_len = sizeof(struct virtio_net_hdr);
2742 } else {
2743 n->host_hdr_len = 0;
2744 }
eb6b6c12 2745
1773d9ee 2746 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
96d5e201 2747
fed699f9 2748 n->vqs[0].tx_waiting = 0;
1773d9ee 2749 n->tx_burst = n->net_conf.txburst;
bb9d17f8 2750 virtio_net_set_mrg_rx_bufs(n, 0, 0);
002437cd 2751 n->promisc = 1; /* for compatibility */
fbe78f4f 2752
7267c094 2753 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
b6503ed9 2754
7267c094 2755 n->vlans = g_malloc0(MAX_VLAN >> 3);
f21c0ed9 2756
b1be4280
AK
2757 nc = qemu_get_queue(n->nic);
2758 nc->rxfilter_notify_enabled = 1;
2759
2974e916 2760 QTAILQ_INIT(&n->rsc_chains);
284a32f0 2761 n->qdev = dev;
17ec5a86
FK
2762}
2763
306ec6c3 2764static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
17ec5a86 2765{
306ec6c3
AF
2766 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2767 VirtIONet *n = VIRTIO_NET(dev);
f9d6dbf0 2768 int i, max_queues;
17ec5a86
FK
2769
2770 /* This will stop vhost backend if appropriate. */
2771 virtio_net_set_status(vdev, 0);
2772
9e288406
MA
2773 g_free(n->netclient_name);
2774 n->netclient_name = NULL;
2775 g_free(n->netclient_type);
2776 n->netclient_type = NULL;
8a253ec2 2777
17ec5a86
FK
2778 g_free(n->mac_table.macs);
2779 g_free(n->vlans);
2780
f9d6dbf0
WC
2781 max_queues = n->multiqueue ? n->max_queues : 1;
2782 for (i = 0; i < max_queues; i++) {
2783 virtio_net_del_queue(n, i);
17ec5a86
FK
2784 }
2785
9d8c6a25 2786 qemu_announce_timer_del(&n->announce_timer);
17ec5a86
FK
2787 g_free(n->vqs);
2788 qemu_del_nic(n->nic);
2974e916 2789 virtio_net_rsc_cleanup(n);
6a1a8cc7 2790 virtio_cleanup(vdev);
17ec5a86
FK
2791}
2792
2793static void virtio_net_instance_init(Object *obj)
2794{
2795 VirtIONet *n = VIRTIO_NET(obj);
2796
2797 /*
2798 * The default config_size is sizeof(struct virtio_net_config).
2799 * Can be overriden with virtio_net_set_config_size.
2800 */
2801 n->config_size = sizeof(struct virtio_net_config);
aa4197c3
GA
2802 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2803 "bootindex", "/ethernet-phy@0",
2804 DEVICE(n), NULL);
17ec5a86
FK
2805}
2806
44b1ff31 2807static int virtio_net_pre_save(void *opaque)
4d45dcfb
HP
2808{
2809 VirtIONet *n = opaque;
2810
2811 /* At this point, backend must be stopped, otherwise
2812 * it might keep writing to memory. */
2813 assert(!n->vhost_started);
44b1ff31
DDAG
2814
2815 return 0;
4d45dcfb
HP
2816}
2817
2818static const VMStateDescription vmstate_virtio_net = {
2819 .name = "virtio-net",
2820 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2821 .version_id = VIRTIO_NET_VM_VERSION,
2822 .fields = (VMStateField[]) {
2823 VMSTATE_VIRTIO_DEVICE,
2824 VMSTATE_END_OF_LIST()
2825 },
2826 .pre_save = virtio_net_pre_save,
2827};
290c2428 2828
17ec5a86 2829static Property virtio_net_properties[] = {
127833ee
JB
2830 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
2831 VIRTIO_NET_F_CSUM, true),
2832 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
87108bb2 2833 VIRTIO_NET_F_GUEST_CSUM, true),
127833ee
JB
2834 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2835 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
87108bb2 2836 VIRTIO_NET_F_GUEST_TSO4, true),
127833ee 2837 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
87108bb2 2838 VIRTIO_NET_F_GUEST_TSO6, true),
127833ee 2839 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
87108bb2 2840 VIRTIO_NET_F_GUEST_ECN, true),
127833ee 2841 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
87108bb2 2842 VIRTIO_NET_F_GUEST_UFO, true),
127833ee 2843 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
87108bb2 2844 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
127833ee 2845 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
87108bb2 2846 VIRTIO_NET_F_HOST_TSO4, true),
127833ee 2847 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
87108bb2 2848 VIRTIO_NET_F_HOST_TSO6, true),
127833ee 2849 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
87108bb2 2850 VIRTIO_NET_F_HOST_ECN, true),
127833ee 2851 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
87108bb2 2852 VIRTIO_NET_F_HOST_UFO, true),
127833ee 2853 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
87108bb2 2854 VIRTIO_NET_F_MRG_RXBUF, true),
127833ee 2855 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
87108bb2 2856 VIRTIO_NET_F_STATUS, true),
127833ee 2857 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
87108bb2 2858 VIRTIO_NET_F_CTRL_VQ, true),
127833ee 2859 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
87108bb2 2860 VIRTIO_NET_F_CTRL_RX, true),
127833ee 2861 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
87108bb2 2862 VIRTIO_NET_F_CTRL_VLAN, true),
127833ee 2863 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
87108bb2 2864 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
127833ee 2865 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
87108bb2 2866 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
127833ee 2867 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
87108bb2 2868 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
127833ee 2869 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2974e916
YB
2870 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
2871 VIRTIO_NET_F_RSC_EXT, false),
2872 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
2873 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
17ec5a86
FK
2874 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2875 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
87108bb2 2876 TX_TIMER_INTERVAL),
17ec5a86
FK
2877 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2878 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1c0fbfa3
MT
2879 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2880 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
9b02e161
WW
2881 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2882 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
a93e599d 2883 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
75ebec11
MC
2884 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2885 true),
9473939e
JB
2886 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
2887 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
17ec5a86
FK
2888 DEFINE_PROP_END_OF_LIST(),
2889};
2890
2891static void virtio_net_class_init(ObjectClass *klass, void *data)
2892{
2893 DeviceClass *dc = DEVICE_CLASS(klass);
2894 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
e6f746b3 2895
17ec5a86 2896 dc->props = virtio_net_properties;
290c2428 2897 dc->vmsd = &vmstate_virtio_net;
125ee0ed 2898 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
e6f746b3 2899 vdc->realize = virtio_net_device_realize;
306ec6c3 2900 vdc->unrealize = virtio_net_device_unrealize;
17ec5a86
FK
2901 vdc->get_config = virtio_net_get_config;
2902 vdc->set_config = virtio_net_set_config;
2903 vdc->get_features = virtio_net_get_features;
2904 vdc->set_features = virtio_net_set_features;
2905 vdc->bad_features = virtio_net_bad_features;
2906 vdc->reset = virtio_net_reset;
2907 vdc->set_status = virtio_net_set_status;
2908 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2909 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2a083ffd 2910 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
982b78c5 2911 vdc->vmsd = &vmstate_virtio_net_device;
17ec5a86
FK
2912}
2913
2914static const TypeInfo virtio_net_info = {
2915 .name = TYPE_VIRTIO_NET,
2916 .parent = TYPE_VIRTIO_DEVICE,
2917 .instance_size = sizeof(VirtIONet),
2918 .instance_init = virtio_net_instance_init,
2919 .class_init = virtio_net_class_init,
2920};
2921
2922static void virtio_register_types(void)
2923{
2924 type_register_static(&virtio_net_info);
2925}
2926
2927type_init(virtio_register_types)