Revert "gdbstub: Do not kill target in system emulation mode"
[qemu/qmp-unstable.git] / hw / net / virtio-net.c
blob3af6faf4c8a3714afd66599626b6626ab655ec5e
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/iov.h"
15 #include "hw/virtio/virtio.h"
16 #include "net/net.h"
17 #include "net/checksum.h"
18 #include "net/tap.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "qapi-event.h"
26 #include "hw/virtio/virtio-access.h"
28 #define VIRTIO_NET_VM_VERSION 11
30 #define MAC_TABLE_ENTRIES 64
31 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 * Calculate the number of bytes up to and including the given 'field' of
35 * 'container'.
37 #define endof(container, field) \
38 (offsetof(container, field) + sizeof(((container *)0)->field))
40 typedef struct VirtIOFeature {
41 uint32_t flags;
42 size_t end;
43 } VirtIOFeature;
45 static VirtIOFeature feature_sizes[] = {
46 {.flags = 1 << VIRTIO_NET_F_MAC,
47 .end = endof(struct virtio_net_config, mac)},
48 {.flags = 1 << VIRTIO_NET_F_STATUS,
49 .end = endof(struct virtio_net_config, status)},
50 {.flags = 1 << VIRTIO_NET_F_MQ,
51 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
55 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
57 VirtIONet *n = qemu_get_nic_opaque(nc);
59 return &n->vqs[nc->queue_index];
62 static int vq2q(int queue_index)
64 return queue_index / 2;
67 /* TODO
68 * - we could suppress RX interrupt if we were so inclined.
71 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
73 VirtIONet *n = VIRTIO_NET(vdev);
74 struct virtio_net_config netcfg;
76 virtio_stw_p(vdev, &netcfg.status, n->status);
77 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
78 memcpy(netcfg.mac, n->mac, ETH_ALEN);
79 memcpy(config, &netcfg, n->config_size);
82 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
84 VirtIONet *n = VIRTIO_NET(vdev);
85 struct virtio_net_config netcfg = {};
87 memcpy(&netcfg, config, n->config_size);
89 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
90 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
91 memcpy(n->mac, netcfg.mac, ETH_ALEN);
92 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
96 static bool virtio_net_started(VirtIONet *n, uint8_t status)
98 VirtIODevice *vdev = VIRTIO_DEVICE(n);
99 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
100 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
103 static void virtio_net_announce_timer(void *opaque)
105 VirtIONet *n = opaque;
106 VirtIODevice *vdev = VIRTIO_DEVICE(n);
108 n->announce_counter--;
109 n->status |= VIRTIO_NET_S_ANNOUNCE;
110 virtio_notify_config(vdev);
113 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
115 VirtIODevice *vdev = VIRTIO_DEVICE(n);
116 NetClientState *nc = qemu_get_queue(n->nic);
117 int queues = n->multiqueue ? n->max_queues : 1;
119 if (!get_vhost_net(nc->peer)) {
120 return;
123 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
124 !!n->vhost_started) {
125 return;
127 if (!n->vhost_started) {
128 int r, i;
130 if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
131 return;
134 /* Any packets outstanding? Purge them to avoid touching rings
135 * when vhost is running.
137 for (i = 0; i < queues; i++) {
138 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
140 /* Purge both directions: TX and RX. */
141 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
142 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
145 n->vhost_started = 1;
146 r = vhost_net_start(vdev, n->nic->ncs, queues);
147 if (r < 0) {
148 error_report("unable to start vhost net: %d: "
149 "falling back on userspace virtio", -r);
150 n->vhost_started = 0;
152 } else {
153 vhost_net_stop(vdev, n->nic->ncs, queues);
154 n->vhost_started = 0;
158 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
160 VirtIONet *n = VIRTIO_NET(vdev);
161 VirtIONetQueue *q;
162 int i;
163 uint8_t queue_status;
165 virtio_net_vhost_status(n, status);
167 for (i = 0; i < n->max_queues; i++) {
168 q = &n->vqs[i];
170 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
171 queue_status = 0;
172 } else {
173 queue_status = status;
176 if (!q->tx_waiting) {
177 continue;
180 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
181 if (q->tx_timer) {
182 timer_mod(q->tx_timer,
183 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
184 } else {
185 qemu_bh_schedule(q->tx_bh);
187 } else {
188 if (q->tx_timer) {
189 timer_del(q->tx_timer);
190 } else {
191 qemu_bh_cancel(q->tx_bh);
197 static void virtio_net_set_link_status(NetClientState *nc)
199 VirtIONet *n = qemu_get_nic_opaque(nc);
200 VirtIODevice *vdev = VIRTIO_DEVICE(n);
201 uint16_t old_status = n->status;
203 if (nc->link_down)
204 n->status &= ~VIRTIO_NET_S_LINK_UP;
205 else
206 n->status |= VIRTIO_NET_S_LINK_UP;
208 if (n->status != old_status)
209 virtio_notify_config(vdev);
211 virtio_net_set_status(vdev, vdev->status);
214 static void rxfilter_notify(NetClientState *nc)
216 VirtIONet *n = qemu_get_nic_opaque(nc);
218 if (nc->rxfilter_notify_enabled) {
219 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
220 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
221 n->netclient_name, path, &error_abort);
222 g_free(path);
224 /* disable event notification to avoid events flooding */
225 nc->rxfilter_notify_enabled = 0;
229 static intList *get_vlan_table(VirtIONet *n)
231 intList *list, *entry;
232 int i, j;
234 list = NULL;
235 for (i = 0; i < MAX_VLAN >> 5; i++) {
236 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
237 if (n->vlans[i] & (1U << j)) {
238 entry = g_malloc0(sizeof(*entry));
239 entry->value = (i << 5) + j;
240 entry->next = list;
241 list = entry;
246 return list;
249 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
251 VirtIONet *n = qemu_get_nic_opaque(nc);
252 VirtIODevice *vdev = VIRTIO_DEVICE(n);
253 RxFilterInfo *info;
254 strList *str_list, *entry;
255 int i;
257 info = g_malloc0(sizeof(*info));
258 info->name = g_strdup(nc->name);
259 info->promiscuous = n->promisc;
261 if (n->nouni) {
262 info->unicast = RX_STATE_NONE;
263 } else if (n->alluni) {
264 info->unicast = RX_STATE_ALL;
265 } else {
266 info->unicast = RX_STATE_NORMAL;
269 if (n->nomulti) {
270 info->multicast = RX_STATE_NONE;
271 } else if (n->allmulti) {
272 info->multicast = RX_STATE_ALL;
273 } else {
274 info->multicast = RX_STATE_NORMAL;
277 info->broadcast_allowed = n->nobcast;
278 info->multicast_overflow = n->mac_table.multi_overflow;
279 info->unicast_overflow = n->mac_table.uni_overflow;
281 info->main_mac = qemu_mac_strdup_printf(n->mac);
283 str_list = NULL;
284 for (i = 0; i < n->mac_table.first_multi; i++) {
285 entry = g_malloc0(sizeof(*entry));
286 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
287 entry->next = str_list;
288 str_list = entry;
290 info->unicast_table = str_list;
292 str_list = NULL;
293 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
294 entry = g_malloc0(sizeof(*entry));
295 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
296 entry->next = str_list;
297 str_list = entry;
299 info->multicast_table = str_list;
300 info->vlan_table = get_vlan_table(n);
302 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
303 info->vlan = RX_STATE_ALL;
304 } else if (!info->vlan_table) {
305 info->vlan = RX_STATE_NONE;
306 } else {
307 info->vlan = RX_STATE_NORMAL;
310 /* enable event notification after query */
311 nc->rxfilter_notify_enabled = 1;
313 return info;
316 static void virtio_net_reset(VirtIODevice *vdev)
318 VirtIONet *n = VIRTIO_NET(vdev);
320 /* Reset back to compatibility mode */
321 n->promisc = 1;
322 n->allmulti = 0;
323 n->alluni = 0;
324 n->nomulti = 0;
325 n->nouni = 0;
326 n->nobcast = 0;
327 /* multiqueue is disabled by default */
328 n->curr_queues = 1;
329 timer_del(n->announce_timer);
330 n->announce_counter = 0;
331 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
333 /* Flush any MAC and VLAN filter table state */
334 n->mac_table.in_use = 0;
335 n->mac_table.first_multi = 0;
336 n->mac_table.multi_overflow = 0;
337 n->mac_table.uni_overflow = 0;
338 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
339 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
340 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
341 memset(n->vlans, 0, MAX_VLAN >> 3);
344 static void peer_test_vnet_hdr(VirtIONet *n)
346 NetClientState *nc = qemu_get_queue(n->nic);
347 if (!nc->peer) {
348 return;
351 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
354 static int peer_has_vnet_hdr(VirtIONet *n)
356 return n->has_vnet_hdr;
359 static int peer_has_ufo(VirtIONet *n)
361 if (!peer_has_vnet_hdr(n))
362 return 0;
364 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
366 return n->has_ufo;
369 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
371 int i;
372 NetClientState *nc;
374 n->mergeable_rx_bufs = mergeable_rx_bufs;
376 n->guest_hdr_len = n->mergeable_rx_bufs ?
377 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
379 for (i = 0; i < n->max_queues; i++) {
380 nc = qemu_get_subqueue(n->nic, i);
382 if (peer_has_vnet_hdr(n) &&
383 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
384 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
385 n->host_hdr_len = n->guest_hdr_len;
390 static int peer_attach(VirtIONet *n, int index)
392 NetClientState *nc = qemu_get_subqueue(n->nic, index);
394 if (!nc->peer) {
395 return 0;
398 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
399 return 0;
402 return tap_enable(nc->peer);
405 static int peer_detach(VirtIONet *n, int index)
407 NetClientState *nc = qemu_get_subqueue(n->nic, index);
409 if (!nc->peer) {
410 return 0;
413 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
414 return 0;
417 return tap_disable(nc->peer);
420 static void virtio_net_set_queues(VirtIONet *n)
422 int i;
423 int r;
425 for (i = 0; i < n->max_queues; i++) {
426 if (i < n->curr_queues) {
427 r = peer_attach(n, i);
428 assert(!r);
429 } else {
430 r = peer_detach(n, i);
431 assert(!r);
436 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
438 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
440 VirtIONet *n = VIRTIO_NET(vdev);
441 NetClientState *nc = qemu_get_queue(n->nic);
443 /* Firstly sync all virtio-net possible supported features */
444 features |= n->host_features;
446 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
448 if (!peer_has_vnet_hdr(n)) {
449 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
450 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
451 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
452 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
454 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
455 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
456 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
457 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
460 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
461 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
462 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
465 if (!get_vhost_net(nc->peer)) {
466 return features;
468 return vhost_net_get_features(get_vhost_net(nc->peer), features);
471 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
473 uint32_t features = 0;
475 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
476 * but also these: */
477 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
478 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
479 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
480 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
481 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
483 return features;
486 static void virtio_net_apply_guest_offloads(VirtIONet *n)
488 qemu_set_offload(qemu_get_queue(n->nic)->peer,
489 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
490 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
491 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
492 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
493 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
496 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
498 static const uint64_t guest_offloads_mask =
499 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
500 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
501 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
502 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
503 (1ULL << VIRTIO_NET_F_GUEST_UFO);
505 return guest_offloads_mask & features;
508 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
510 VirtIODevice *vdev = VIRTIO_DEVICE(n);
511 return virtio_net_guest_offloads_by_features(vdev->guest_features);
514 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
516 VirtIONet *n = VIRTIO_NET(vdev);
517 int i;
519 virtio_net_set_multiqueue(n,
520 __virtio_has_feature(features, VIRTIO_NET_F_MQ));
522 virtio_net_set_mrg_rx_bufs(n,
523 __virtio_has_feature(features,
524 VIRTIO_NET_F_MRG_RXBUF));
526 if (n->has_vnet_hdr) {
527 n->curr_guest_offloads =
528 virtio_net_guest_offloads_by_features(features);
529 virtio_net_apply_guest_offloads(n);
532 for (i = 0; i < n->max_queues; i++) {
533 NetClientState *nc = qemu_get_subqueue(n->nic, i);
535 if (!get_vhost_net(nc->peer)) {
536 continue;
538 vhost_net_ack_features(get_vhost_net(nc->peer), features);
541 if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
542 memset(n->vlans, 0, MAX_VLAN >> 3);
543 } else {
544 memset(n->vlans, 0xff, MAX_VLAN >> 3);
548 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
549 struct iovec *iov, unsigned int iov_cnt)
551 uint8_t on;
552 size_t s;
553 NetClientState *nc = qemu_get_queue(n->nic);
555 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
556 if (s != sizeof(on)) {
557 return VIRTIO_NET_ERR;
560 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
561 n->promisc = on;
562 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
563 n->allmulti = on;
564 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
565 n->alluni = on;
566 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
567 n->nomulti = on;
568 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
569 n->nouni = on;
570 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
571 n->nobcast = on;
572 } else {
573 return VIRTIO_NET_ERR;
576 rxfilter_notify(nc);
578 return VIRTIO_NET_OK;
581 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
582 struct iovec *iov, unsigned int iov_cnt)
584 VirtIODevice *vdev = VIRTIO_DEVICE(n);
585 uint64_t offloads;
586 size_t s;
588 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
589 return VIRTIO_NET_ERR;
592 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
593 if (s != sizeof(offloads)) {
594 return VIRTIO_NET_ERR;
597 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
598 uint64_t supported_offloads;
600 if (!n->has_vnet_hdr) {
601 return VIRTIO_NET_ERR;
604 supported_offloads = virtio_net_supported_guest_offloads(n);
605 if (offloads & ~supported_offloads) {
606 return VIRTIO_NET_ERR;
609 n->curr_guest_offloads = offloads;
610 virtio_net_apply_guest_offloads(n);
612 return VIRTIO_NET_OK;
613 } else {
614 return VIRTIO_NET_ERR;
618 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
619 struct iovec *iov, unsigned int iov_cnt)
621 VirtIODevice *vdev = VIRTIO_DEVICE(n);
622 struct virtio_net_ctrl_mac mac_data;
623 size_t s;
624 NetClientState *nc = qemu_get_queue(n->nic);
626 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
627 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
628 return VIRTIO_NET_ERR;
630 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
631 assert(s == sizeof(n->mac));
632 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
633 rxfilter_notify(nc);
635 return VIRTIO_NET_OK;
638 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
639 return VIRTIO_NET_ERR;
642 int in_use = 0;
643 int first_multi = 0;
644 uint8_t uni_overflow = 0;
645 uint8_t multi_overflow = 0;
646 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
648 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
649 sizeof(mac_data.entries));
650 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
651 if (s != sizeof(mac_data.entries)) {
652 goto error;
654 iov_discard_front(&iov, &iov_cnt, s);
656 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
657 goto error;
660 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
661 s = iov_to_buf(iov, iov_cnt, 0, macs,
662 mac_data.entries * ETH_ALEN);
663 if (s != mac_data.entries * ETH_ALEN) {
664 goto error;
666 in_use += mac_data.entries;
667 } else {
668 uni_overflow = 1;
671 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
673 first_multi = in_use;
675 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
676 sizeof(mac_data.entries));
677 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
678 if (s != sizeof(mac_data.entries)) {
679 goto error;
682 iov_discard_front(&iov, &iov_cnt, s);
684 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
685 goto error;
688 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
689 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
690 mac_data.entries * ETH_ALEN);
691 if (s != mac_data.entries * ETH_ALEN) {
692 goto error;
694 in_use += mac_data.entries;
695 } else {
696 multi_overflow = 1;
699 n->mac_table.in_use = in_use;
700 n->mac_table.first_multi = first_multi;
701 n->mac_table.uni_overflow = uni_overflow;
702 n->mac_table.multi_overflow = multi_overflow;
703 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
704 g_free(macs);
705 rxfilter_notify(nc);
707 return VIRTIO_NET_OK;
709 error:
710 g_free(macs);
711 return VIRTIO_NET_ERR;
714 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
715 struct iovec *iov, unsigned int iov_cnt)
717 VirtIODevice *vdev = VIRTIO_DEVICE(n);
718 uint16_t vid;
719 size_t s;
720 NetClientState *nc = qemu_get_queue(n->nic);
722 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
723 vid = virtio_lduw_p(vdev, &vid);
724 if (s != sizeof(vid)) {
725 return VIRTIO_NET_ERR;
728 if (vid >= MAX_VLAN)
729 return VIRTIO_NET_ERR;
731 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
732 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
733 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
734 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
735 else
736 return VIRTIO_NET_ERR;
738 rxfilter_notify(nc);
740 return VIRTIO_NET_OK;
743 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
744 struct iovec *iov, unsigned int iov_cnt)
746 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
747 n->status & VIRTIO_NET_S_ANNOUNCE) {
748 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
749 if (n->announce_counter) {
750 timer_mod(n->announce_timer,
751 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
752 self_announce_delay(n->announce_counter));
754 return VIRTIO_NET_OK;
755 } else {
756 return VIRTIO_NET_ERR;
760 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
761 struct iovec *iov, unsigned int iov_cnt)
763 VirtIODevice *vdev = VIRTIO_DEVICE(n);
764 struct virtio_net_ctrl_mq mq;
765 size_t s;
766 uint16_t queues;
768 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
769 if (s != sizeof(mq)) {
770 return VIRTIO_NET_ERR;
773 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
774 return VIRTIO_NET_ERR;
777 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
779 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
780 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
781 queues > n->max_queues ||
782 !n->multiqueue) {
783 return VIRTIO_NET_ERR;
786 n->curr_queues = queues;
787 /* stop the backend before changing the number of queues to avoid handling a
788 * disabled queue */
789 virtio_net_set_status(vdev, vdev->status);
790 virtio_net_set_queues(n);
792 return VIRTIO_NET_OK;
794 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
796 VirtIONet *n = VIRTIO_NET(vdev);
797 struct virtio_net_ctrl_hdr ctrl;
798 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
799 VirtQueueElement elem;
800 size_t s;
801 struct iovec *iov, *iov2;
802 unsigned int iov_cnt;
804 while (virtqueue_pop(vq, &elem)) {
805 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
806 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
807 error_report("virtio-net ctrl missing headers");
808 exit(1);
811 iov_cnt = elem.out_num;
812 iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
813 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
814 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
815 if (s != sizeof(ctrl)) {
816 status = VIRTIO_NET_ERR;
817 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
818 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
819 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
820 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
821 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
822 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
823 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
824 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
825 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
826 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
827 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
828 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
831 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
832 assert(s == sizeof(status));
834 virtqueue_push(vq, &elem, sizeof(status));
835 virtio_notify(vdev, vq);
836 g_free(iov2);
840 /* RX */
842 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
844 VirtIONet *n = VIRTIO_NET(vdev);
845 int queue_index = vq2q(virtio_get_queue_index(vq));
847 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
850 static int virtio_net_can_receive(NetClientState *nc)
852 VirtIONet *n = qemu_get_nic_opaque(nc);
853 VirtIODevice *vdev = VIRTIO_DEVICE(n);
854 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
856 if (!vdev->vm_running) {
857 return 0;
860 if (nc->queue_index >= n->curr_queues) {
861 return 0;
864 if (!virtio_queue_ready(q->rx_vq) ||
865 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
866 return 0;
869 return 1;
872 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
874 VirtIONet *n = q->n;
875 if (virtio_queue_empty(q->rx_vq) ||
876 (n->mergeable_rx_bufs &&
877 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
878 virtio_queue_set_notification(q->rx_vq, 1);
880 /* To avoid a race condition where the guest has made some buffers
881 * available after the above check but before notification was
882 * enabled, check for available buffers again.
884 if (virtio_queue_empty(q->rx_vq) ||
885 (n->mergeable_rx_bufs &&
886 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
887 return 0;
891 virtio_queue_set_notification(q->rx_vq, 0);
892 return 1;
895 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
897 virtio_tswap16s(vdev, &hdr->hdr_len);
898 virtio_tswap16s(vdev, &hdr->gso_size);
899 virtio_tswap16s(vdev, &hdr->csum_start);
900 virtio_tswap16s(vdev, &hdr->csum_offset);
903 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
904 * it never finds out that the packets don't have valid checksums. This
905 * causes dhclient to get upset. Fedora's carried a patch for ages to
906 * fix this with Xen but it hasn't appeared in an upstream release of
907 * dhclient yet.
909 * To avoid breaking existing guests, we catch udp packets and add
910 * checksums. This is terrible but it's better than hacking the guest
911 * kernels.
913 * N.B. if we introduce a zero-copy API, this operation is no longer free so
914 * we should provide a mechanism to disable it to avoid polluting the host
915 * cache.
917 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
918 uint8_t *buf, size_t size)
920 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
921 (size > 27 && size < 1500) && /* normal sized MTU */
922 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
923 (buf[23] == 17) && /* ip.protocol == UDP */
924 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
925 net_checksum_calculate(buf, size);
926 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
930 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
931 const void *buf, size_t size)
933 if (n->has_vnet_hdr) {
934 /* FIXME this cast is evil */
935 void *wbuf = (void *)buf;
936 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
937 size - n->host_hdr_len);
938 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
939 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
940 } else {
941 struct virtio_net_hdr hdr = {
942 .flags = 0,
943 .gso_type = VIRTIO_NET_HDR_GSO_NONE
945 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
949 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
951 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
952 static const uint8_t vlan[] = {0x81, 0x00};
953 uint8_t *ptr = (uint8_t *)buf;
954 int i;
956 if (n->promisc)
957 return 1;
959 ptr += n->host_hdr_len;
961 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
962 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
963 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
964 return 0;
967 if (ptr[0] & 1) { // multicast
968 if (!memcmp(ptr, bcast, sizeof(bcast))) {
969 return !n->nobcast;
970 } else if (n->nomulti) {
971 return 0;
972 } else if (n->allmulti || n->mac_table.multi_overflow) {
973 return 1;
976 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
977 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
978 return 1;
981 } else { // unicast
982 if (n->nouni) {
983 return 0;
984 } else if (n->alluni || n->mac_table.uni_overflow) {
985 return 1;
986 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
987 return 1;
990 for (i = 0; i < n->mac_table.first_multi; i++) {
991 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
992 return 1;
997 return 0;
1000 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1002 VirtIONet *n = qemu_get_nic_opaque(nc);
1003 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1004 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1005 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1006 struct virtio_net_hdr_mrg_rxbuf mhdr;
1007 unsigned mhdr_cnt = 0;
1008 size_t offset, i, guest_offset;
1010 if (!virtio_net_can_receive(nc)) {
1011 return -1;
1014 /* hdr_len refers to the header we supply to the guest */
1015 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1016 return 0;
1019 if (!receive_filter(n, buf, size))
1020 return size;
1022 offset = i = 0;
1024 while (offset < size) {
1025 VirtQueueElement elem;
1026 int len, total;
1027 const struct iovec *sg = elem.in_sg;
1029 total = 0;
1031 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
1032 if (i == 0)
1033 return -1;
1034 error_report("virtio-net unexpected empty queue: "
1035 "i %zd mergeable %d offset %zd, size %zd, "
1036 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
1037 i, n->mergeable_rx_bufs, offset, size,
1038 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
1039 exit(1);
1042 if (elem.in_num < 1) {
1043 error_report("virtio-net receive queue contains no in buffers");
1044 exit(1);
1047 if (i == 0) {
1048 assert(offset == 0);
1049 if (n->mergeable_rx_bufs) {
1050 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1051 sg, elem.in_num,
1052 offsetof(typeof(mhdr), num_buffers),
1053 sizeof(mhdr.num_buffers));
1056 receive_header(n, sg, elem.in_num, buf, size);
1057 offset = n->host_hdr_len;
1058 total += n->guest_hdr_len;
1059 guest_offset = n->guest_hdr_len;
1060 } else {
1061 guest_offset = 0;
1064 /* copy in packet. ugh */
1065 len = iov_from_buf(sg, elem.in_num, guest_offset,
1066 buf + offset, size - offset);
1067 total += len;
1068 offset += len;
1069 /* If buffers can't be merged, at this point we
1070 * must have consumed the complete packet.
1071 * Otherwise, drop it. */
1072 if (!n->mergeable_rx_bufs && offset < size) {
1073 #if 0
1074 error_report("virtio-net truncated non-mergeable packet: "
1075 "i %zd mergeable %d offset %zd, size %zd, "
1076 "guest hdr len %zd, host hdr len %zd",
1077 i, n->mergeable_rx_bufs,
1078 offset, size, n->guest_hdr_len, n->host_hdr_len);
1079 #endif
1080 return size;
1083 /* signal other side */
1084 virtqueue_fill(q->rx_vq, &elem, total, i++);
1087 if (mhdr_cnt) {
1088 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1089 iov_from_buf(mhdr_sg, mhdr_cnt,
1091 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1094 virtqueue_flush(q->rx_vq, i);
1095 virtio_notify(vdev, q->rx_vq);
1097 return size;
1100 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1102 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1104 VirtIONet *n = qemu_get_nic_opaque(nc);
1105 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1106 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1108 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
1109 virtio_notify(vdev, q->tx_vq);
1111 q->async_tx.elem.out_num = q->async_tx.len = 0;
1113 virtio_queue_set_notification(q->tx_vq, 1);
1114 virtio_net_flush_tx(q);
1117 /* TX */
1118 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1120 VirtIONet *n = q->n;
1121 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1122 VirtQueueElement elem;
1123 int32_t num_packets = 0;
1124 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1125 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1126 return num_packets;
1129 if (q->async_tx.elem.out_num) {
1130 virtio_queue_set_notification(q->tx_vq, 0);
1131 return num_packets;
1134 while (virtqueue_pop(q->tx_vq, &elem)) {
1135 ssize_t ret, len;
1136 unsigned int out_num = elem.out_num;
1137 struct iovec *out_sg = &elem.out_sg[0];
1138 struct iovec sg[VIRTQUEUE_MAX_SIZE];
1140 if (out_num < 1) {
1141 error_report("virtio-net header not in first element");
1142 exit(1);
1145 if (n->has_vnet_hdr) {
1146 if (out_sg[0].iov_len < n->guest_hdr_len) {
1147 error_report("virtio-net header incorrect");
1148 exit(1);
1150 virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
1154 * If host wants to see the guest header as is, we can
1155 * pass it on unchanged. Otherwise, copy just the parts
1156 * that host is interested in.
1158 assert(n->host_hdr_len <= n->guest_hdr_len);
1159 if (n->host_hdr_len != n->guest_hdr_len) {
1160 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1161 out_sg, out_num,
1162 0, n->host_hdr_len);
1163 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1164 out_sg, out_num,
1165 n->guest_hdr_len, -1);
1166 out_num = sg_num;
1167 out_sg = sg;
1170 len = n->guest_hdr_len;
1172 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1173 out_sg, out_num, virtio_net_tx_complete);
1174 if (ret == 0) {
1175 virtio_queue_set_notification(q->tx_vq, 0);
1176 q->async_tx.elem = elem;
1177 q->async_tx.len = len;
1178 return -EBUSY;
1181 len += ret;
1183 virtqueue_push(q->tx_vq, &elem, 0);
1184 virtio_notify(vdev, q->tx_vq);
1186 if (++num_packets >= n->tx_burst) {
1187 break;
1190 return num_packets;
1193 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1195 VirtIONet *n = VIRTIO_NET(vdev);
1196 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1198 /* This happens when device was stopped but VCPU wasn't. */
1199 if (!vdev->vm_running) {
1200 q->tx_waiting = 1;
1201 return;
1204 if (q->tx_waiting) {
1205 virtio_queue_set_notification(vq, 1);
1206 timer_del(q->tx_timer);
1207 q->tx_waiting = 0;
1208 virtio_net_flush_tx(q);
1209 } else {
1210 timer_mod(q->tx_timer,
1211 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1212 q->tx_waiting = 1;
1213 virtio_queue_set_notification(vq, 0);
1217 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1219 VirtIONet *n = VIRTIO_NET(vdev);
1220 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1222 if (unlikely(q->tx_waiting)) {
1223 return;
1225 q->tx_waiting = 1;
1226 /* This happens when device was stopped but VCPU wasn't. */
1227 if (!vdev->vm_running) {
1228 return;
1230 virtio_queue_set_notification(vq, 0);
1231 qemu_bh_schedule(q->tx_bh);
1234 static void virtio_net_tx_timer(void *opaque)
1236 VirtIONetQueue *q = opaque;
1237 VirtIONet *n = q->n;
1238 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1239 /* This happens when device was stopped but BH wasn't. */
1240 if (!vdev->vm_running) {
1241 /* Make sure tx waiting is set, so we'll run when restarted. */
1242 assert(q->tx_waiting);
1243 return;
1246 q->tx_waiting = 0;
1248 /* Just in case the driver is not ready on more */
1249 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1250 return;
1253 virtio_queue_set_notification(q->tx_vq, 1);
1254 virtio_net_flush_tx(q);
1257 static void virtio_net_tx_bh(void *opaque)
1259 VirtIONetQueue *q = opaque;
1260 VirtIONet *n = q->n;
1261 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1262 int32_t ret;
1264 /* This happens when device was stopped but BH wasn't. */
1265 if (!vdev->vm_running) {
1266 /* Make sure tx waiting is set, so we'll run when restarted. */
1267 assert(q->tx_waiting);
1268 return;
1271 q->tx_waiting = 0;
1273 /* Just in case the driver is not ready on more */
1274 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1275 return;
1278 ret = virtio_net_flush_tx(q);
1279 if (ret == -EBUSY) {
1280 return; /* Notification re-enable handled by tx_complete */
1283 /* If we flush a full burst of packets, assume there are
1284 * more coming and immediately reschedule */
1285 if (ret >= n->tx_burst) {
1286 qemu_bh_schedule(q->tx_bh);
1287 q->tx_waiting = 1;
1288 return;
1291 /* If less than a full burst, re-enable notification and flush
1292 * anything that may have come in while we weren't looking. If
1293 * we find something, assume the guest is still active and reschedule */
1294 virtio_queue_set_notification(q->tx_vq, 1);
1295 if (virtio_net_flush_tx(q) > 0) {
1296 virtio_queue_set_notification(q->tx_vq, 0);
1297 qemu_bh_schedule(q->tx_bh);
1298 q->tx_waiting = 1;
1302 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1304 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1305 int i, max = multiqueue ? n->max_queues : 1;
1307 n->multiqueue = multiqueue;
1309 for (i = 2; i < n->max_queues * 2 + 1; i++) {
1310 virtio_del_queue(vdev, i);
1313 for (i = 1; i < max; i++) {
1314 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1315 if (n->vqs[i].tx_timer) {
1316 n->vqs[i].tx_vq =
1317 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1318 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1319 virtio_net_tx_timer,
1320 &n->vqs[i]);
1321 } else {
1322 n->vqs[i].tx_vq =
1323 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1324 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1327 n->vqs[i].tx_waiting = 0;
1328 n->vqs[i].n = n;
1331 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1332 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1333 * breaking them.
1335 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1337 virtio_net_set_queues(n);
1340 static void virtio_net_save(QEMUFile *f, void *opaque)
1342 VirtIONet *n = opaque;
1343 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1345 /* At this point, backend must be stopped, otherwise
1346 * it might keep writing to memory. */
1347 assert(!n->vhost_started);
1348 virtio_save(vdev, f);
1351 static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
1353 VirtIONet *n = VIRTIO_NET(vdev);
1354 int i;
1356 qemu_put_buffer(f, n->mac, ETH_ALEN);
1357 qemu_put_be32(f, n->vqs[0].tx_waiting);
1358 qemu_put_be32(f, n->mergeable_rx_bufs);
1359 qemu_put_be16(f, n->status);
1360 qemu_put_byte(f, n->promisc);
1361 qemu_put_byte(f, n->allmulti);
1362 qemu_put_be32(f, n->mac_table.in_use);
1363 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1364 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1365 qemu_put_be32(f, n->has_vnet_hdr);
1366 qemu_put_byte(f, n->mac_table.multi_overflow);
1367 qemu_put_byte(f, n->mac_table.uni_overflow);
1368 qemu_put_byte(f, n->alluni);
1369 qemu_put_byte(f, n->nomulti);
1370 qemu_put_byte(f, n->nouni);
1371 qemu_put_byte(f, n->nobcast);
1372 qemu_put_byte(f, n->has_ufo);
1373 if (n->max_queues > 1) {
1374 qemu_put_be16(f, n->max_queues);
1375 qemu_put_be16(f, n->curr_queues);
1376 for (i = 1; i < n->curr_queues; i++) {
1377 qemu_put_be32(f, n->vqs[i].tx_waiting);
1381 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1382 qemu_put_be64(f, n->curr_guest_offloads);
1386 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1388 VirtIONet *n = opaque;
1389 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1391 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1392 return -EINVAL;
1394 return virtio_load(vdev, f, version_id);
1397 static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
1398 int version_id)
1400 VirtIONet *n = VIRTIO_NET(vdev);
1401 int i, link_down;
1403 qemu_get_buffer(f, n->mac, ETH_ALEN);
1404 n->vqs[0].tx_waiting = qemu_get_be32(f);
1406 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1408 if (version_id >= 3)
1409 n->status = qemu_get_be16(f);
1411 if (version_id >= 4) {
1412 if (version_id < 8) {
1413 n->promisc = qemu_get_be32(f);
1414 n->allmulti = qemu_get_be32(f);
1415 } else {
1416 n->promisc = qemu_get_byte(f);
1417 n->allmulti = qemu_get_byte(f);
1421 if (version_id >= 5) {
1422 n->mac_table.in_use = qemu_get_be32(f);
1423 /* MAC_TABLE_ENTRIES may be different from the saved image */
1424 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1425 qemu_get_buffer(f, n->mac_table.macs,
1426 n->mac_table.in_use * ETH_ALEN);
1427 } else {
1428 int64_t i;
1430 /* Overflow detected - can happen if source has a larger MAC table.
1431 * We simply set overflow flag so there's no need to maintain the
1432 * table of addresses, discard them all.
1433 * Note: 64 bit math to avoid integer overflow.
1435 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
1436 qemu_get_byte(f);
1438 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1439 n->mac_table.in_use = 0;
1443 if (version_id >= 6)
1444 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1446 if (version_id >= 7) {
1447 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1448 error_report("virtio-net: saved image requires vnet_hdr=on");
1449 return -1;
1453 if (version_id >= 9) {
1454 n->mac_table.multi_overflow = qemu_get_byte(f);
1455 n->mac_table.uni_overflow = qemu_get_byte(f);
1458 if (version_id >= 10) {
1459 n->alluni = qemu_get_byte(f);
1460 n->nomulti = qemu_get_byte(f);
1461 n->nouni = qemu_get_byte(f);
1462 n->nobcast = qemu_get_byte(f);
1465 if (version_id >= 11) {
1466 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1467 error_report("virtio-net: saved image requires TUN_F_UFO support");
1468 return -1;
1472 if (n->max_queues > 1) {
1473 if (n->max_queues != qemu_get_be16(f)) {
1474 error_report("virtio-net: different max_queues ");
1475 return -1;
1478 n->curr_queues = qemu_get_be16(f);
1479 if (n->curr_queues > n->max_queues) {
1480 error_report("virtio-net: curr_queues %x > max_queues %x",
1481 n->curr_queues, n->max_queues);
1482 return -1;
1484 for (i = 1; i < n->curr_queues; i++) {
1485 n->vqs[i].tx_waiting = qemu_get_be32(f);
1489 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1490 n->curr_guest_offloads = qemu_get_be64(f);
1491 } else {
1492 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1495 if (peer_has_vnet_hdr(n)) {
1496 virtio_net_apply_guest_offloads(n);
1499 virtio_net_set_queues(n);
1501 /* Find the first multicast entry in the saved MAC filter */
1502 for (i = 0; i < n->mac_table.in_use; i++) {
1503 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1504 break;
1507 n->mac_table.first_multi = i;
1509 /* nc.link_down can't be migrated, so infer link_down according
1510 * to link status bit in n->status */
1511 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1512 for (i = 0; i < n->max_queues; i++) {
1513 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1516 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1517 virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
1518 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1519 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1522 return 0;
1525 static NetClientInfo net_virtio_info = {
1526 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1527 .size = sizeof(NICState),
1528 .can_receive = virtio_net_can_receive,
1529 .receive = virtio_net_receive,
1530 .link_status_changed = virtio_net_set_link_status,
1531 .query_rx_filter = virtio_net_query_rxfilter,
1534 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1536 VirtIONet *n = VIRTIO_NET(vdev);
1537 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1538 assert(n->vhost_started);
1539 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1542 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1543 bool mask)
1545 VirtIONet *n = VIRTIO_NET(vdev);
1546 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1547 assert(n->vhost_started);
1548 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1549 vdev, idx, mask);
1552 static void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1554 int i, config_size = 0;
1555 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1556 for (i = 0; feature_sizes[i].flags != 0; i++) {
1557 if (host_features & feature_sizes[i].flags) {
1558 config_size = MAX(feature_sizes[i].end, config_size);
1561 n->config_size = config_size;
1564 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1565 const char *type)
1568 * The name can be NULL, the netclient name will be type.x.
1570 assert(type != NULL);
1572 g_free(n->netclient_name);
1573 g_free(n->netclient_type);
1574 n->netclient_name = g_strdup(name);
1575 n->netclient_type = g_strdup(type);
1578 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1580 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1581 VirtIONet *n = VIRTIO_NET(dev);
1582 NetClientState *nc;
1583 int i;
1585 virtio_net_set_config_size(n, n->host_features);
1586 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1588 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1589 if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
1590 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1591 "must be a positive integer less than %d.",
1592 n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
1593 virtio_cleanup(vdev);
1594 return;
1596 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1597 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1598 n->curr_queues = 1;
1599 n->vqs[0].n = n;
1600 n->tx_timeout = n->net_conf.txtimer;
1602 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1603 && strcmp(n->net_conf.tx, "bh")) {
1604 error_report("virtio-net: "
1605 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1606 n->net_conf.tx);
1607 error_report("Defaulting to \"bh\"");
1610 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1611 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1612 virtio_net_handle_tx_timer);
1613 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
1614 &n->vqs[0]);
1615 } else {
1616 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1617 virtio_net_handle_tx_bh);
1618 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1620 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1621 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1622 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1623 n->status = VIRTIO_NET_S_LINK_UP;
1624 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1625 virtio_net_announce_timer, n);
1627 if (n->netclient_type) {
1629 * Happen when virtio_net_set_netclient_name has been called.
1631 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1632 n->netclient_type, n->netclient_name, n);
1633 } else {
1634 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1635 object_get_typename(OBJECT(dev)), dev->id, n);
1638 peer_test_vnet_hdr(n);
1639 if (peer_has_vnet_hdr(n)) {
1640 for (i = 0; i < n->max_queues; i++) {
1641 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1643 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1644 } else {
1645 n->host_hdr_len = 0;
1648 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1650 n->vqs[0].tx_waiting = 0;
1651 n->tx_burst = n->net_conf.txburst;
1652 virtio_net_set_mrg_rx_bufs(n, 0);
1653 n->promisc = 1; /* for compatibility */
1655 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1657 n->vlans = g_malloc0(MAX_VLAN >> 3);
1659 nc = qemu_get_queue(n->nic);
1660 nc->rxfilter_notify_enabled = 1;
1662 n->qdev = dev;
1663 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1664 virtio_net_save, virtio_net_load, n);
1667 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
1669 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1670 VirtIONet *n = VIRTIO_NET(dev);
1671 int i;
1673 /* This will stop vhost backend if appropriate. */
1674 virtio_net_set_status(vdev, 0);
1676 unregister_savevm(dev, "virtio-net", n);
1678 g_free(n->netclient_name);
1679 n->netclient_name = NULL;
1680 g_free(n->netclient_type);
1681 n->netclient_type = NULL;
1683 g_free(n->mac_table.macs);
1684 g_free(n->vlans);
1686 for (i = 0; i < n->max_queues; i++) {
1687 VirtIONetQueue *q = &n->vqs[i];
1688 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1690 qemu_purge_queued_packets(nc);
1692 if (q->tx_timer) {
1693 timer_del(q->tx_timer);
1694 timer_free(q->tx_timer);
1695 } else if (q->tx_bh) {
1696 qemu_bh_delete(q->tx_bh);
1700 timer_del(n->announce_timer);
1701 timer_free(n->announce_timer);
1702 g_free(n->vqs);
1703 qemu_del_nic(n->nic);
1704 virtio_cleanup(vdev);
1707 static void virtio_net_instance_init(Object *obj)
1709 VirtIONet *n = VIRTIO_NET(obj);
1712 * The default config_size is sizeof(struct virtio_net_config).
1713 * Can be overriden with virtio_net_set_config_size.
1715 n->config_size = sizeof(struct virtio_net_config);
1716 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
1717 "bootindex", "/ethernet-phy@0",
1718 DEVICE(n), NULL);
1721 static Property virtio_net_properties[] = {
1722 DEFINE_VIRTIO_NET_FEATURES(VirtIONet, host_features),
1723 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1724 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1725 TX_TIMER_INTERVAL),
1726 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1727 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1728 DEFINE_PROP_END_OF_LIST(),
1731 static void virtio_net_class_init(ObjectClass *klass, void *data)
1733 DeviceClass *dc = DEVICE_CLASS(klass);
1734 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1736 dc->props = virtio_net_properties;
1737 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1738 vdc->realize = virtio_net_device_realize;
1739 vdc->unrealize = virtio_net_device_unrealize;
1740 vdc->get_config = virtio_net_get_config;
1741 vdc->set_config = virtio_net_set_config;
1742 vdc->get_features = virtio_net_get_features;
1743 vdc->set_features = virtio_net_set_features;
1744 vdc->bad_features = virtio_net_bad_features;
1745 vdc->reset = virtio_net_reset;
1746 vdc->set_status = virtio_net_set_status;
1747 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1748 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1749 vdc->load = virtio_net_load_device;
1750 vdc->save = virtio_net_save_device;
1753 static const TypeInfo virtio_net_info = {
1754 .name = TYPE_VIRTIO_NET,
1755 .parent = TYPE_VIRTIO_DEVICE,
1756 .instance_size = sizeof(VirtIONet),
1757 .instance_init = virtio_net_instance_init,
1758 .class_init = virtio_net_class_init,
1761 static void virtio_register_types(void)
1763 type_register_static(&virtio_net_info);
1766 type_init(virtio_register_types)